Python torch 模块,cat() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.cat()

项目:MMD-GAN    作者:OctoberChang    | 项目源码 | 文件源码
def _mix_rbf_kernel(X, Y, sigma_list):
    assert(X.size(0) == Y.size(0))
    m = X.size(0)

    Z = torch.cat((X, Y), 0)
    ZZT = torch.mm(Z, Z.t())
    diag_ZZT = torch.diag(ZZT).unsqueeze(1)
    Z_norm_sqr = diag_ZZT.expand_as(ZZT)
    exponent = Z_norm_sqr - 2 * ZZT + Z_norm_sqr.t()

    K = 0.0
    for sigma in sigma_list:
        gamma = 1.0 / (2 * sigma**2)
        K += torch.exp(-gamma * exponent)

    return K[:m, :m], K[:m, m:], K[m:, m:], len(sigma_list)
项目:DREAM    作者:LaceyChen17    | 项目源码 | 文件源码
def forward(self, x, lengths, hidden):
        # Basket Encoding 
        ub_seqs = [] # users' basket sequence
        for user in x: # x shape (batch of user, time_step, indice of product) nested lists
            embed_baskets = []
            for basket in user:
                basket = torch.LongTensor(basket).resize_(1, len(basket))
                basket = basket.cuda() if self.config.cuda else basket # use cuda for acceleration
                basket = self.encode(torch.autograd.Variable(basket)) # shape: 1, len(basket), embedding_dim
                embed_baskets.append(self.pool(basket, dim = 1))
            # concat current user's all baskets and append it to users' basket sequence
            ub_seqs.append(torch.cat(embed_baskets, 1)) # shape: 1, num_basket, embedding_dim

        # Input for rnn 
        ub_seqs = torch.cat(ub_seqs, 0).cuda() if self.config.cuda else torch.cat(ub_seqs, 0) # shape: batch_size, max_len, embedding_dim
        packed_ub_seqs = torch.nn.utils.rnn.pack_padded_sequence(ub_seqs, lengths, batch_first=True) # packed sequence as required by pytorch

        # RNN
        output, h_u = self.rnn(packed_ub_seqs, hidden)
        dynamic_user, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True) # shape: batch_size, max_len, embedding_dim
        return dynamic_user, h_u
项目:Structured-Self-Attentive-Sentence-Embedding    作者:ExplorerFreda    | 项目源码 | 文件源码
def forward(self, inp, hidden):
        outp = self.bilstm.forward(inp, hidden)[0]
        size = outp.size()  # [bsz, len, nhid]
        compressed_embeddings = outp.view(-1, size[2])  # [bsz*len, nhid*2]
        transformed_inp = torch.transpose(inp, 0, 1).contiguous()  # [bsz, len]
        transformed_inp = transformed_inp.view(size[0], 1, size[1])  # [bsz, 1, len]
        concatenated_inp = [transformed_inp for i in range(self.attention_hops)]
        concatenated_inp = torch.cat(concatenated_inp, 1)  # [bsz, hop, len]

        hbar = self.tanh(self.ws1(self.drop(compressed_embeddings)))  # [bsz*len, attention-unit]
        alphas = self.ws2(hbar).view(size[0], size[1], -1)  # [bsz, len, hop]
        alphas = torch.transpose(alphas, 1, 2).contiguous()  # [bsz, hop, len]
        penalized_alphas = alphas + (
            -10000 * (concatenated_inp == self.dictionary.word2idx['<pad>']).float())
            # [bsz, hop, len] + [bsz, hop, len]
        alphas = self.softmax(penalized_alphas.view(-1, size[1]))  # [bsz*hop, len]
        alphas = alphas.view(size[0], self.attention_hops, size[1])  # [bsz, hop, len]
        return torch.bmm(alphas, outp), alphas
项目:crnn    作者:wulivicte    | 项目源码 | 文件源码
def __call__(self, batch):
        images, labels = zip(*batch)

        imgH = self.imgH
        imgW = self.imgW
        if self.keep_ratio:
            ratios = []
            for image in images:
                w, h = image.size
                ratios.append(w / float(h))
            ratios.sort()
            max_ratio = ratios[-1]
            imgW = int(np.floor(max_ratio * imgH))
            imgW = max(imgH * self.min_ratio, imgW)  # assure imgH >= imgW

        transform = resizeNormalize((imgW, imgH))
        images = [transform(image) for image in images]
        images = torch.cat([t.unsqueeze(0) for t in images], 0)

        return images, labels
项目:tensorboard    作者:dmlc    | 项目源码 | 文件源码
def make_sprite(label_img, save_path):
    import math
    import torch
    import torchvision
    # this ensures the sprite image has correct dimension as described in 
    # https://www.tensorflow.org/get_started/embedding_viz
    nrow = int(math.ceil((label_img.size(0)) ** 0.5))

    # augment images so that #images equals nrow*nrow
    label_img = torch.cat((label_img, torch.randn(nrow ** 2 - label_img.size(0), *label_img.size()[1:]) * 255), 0)

    # Dirty fix: no pixel are appended by make_grid call in save_image (https://github.com/pytorch/vision/issues/206)
    xx = torchvision.utils.make_grid(torch.Tensor(1, 3, 32, 32), padding=0)
    if xx.size(2) == 33:
        sprite = torchvision.utils.make_grid(label_img, nrow=nrow, padding=0)
        sprite = sprite[:, 1:, 1:]
        torchvision.utils.save_image(sprite, os.path.join(save_path, 'sprite.png'))
    else:
        torchvision.utils.save_image(label_img, os.path.join(save_path, 'sprite.png'), nrow=nrow, padding=0)
项目:SGAN    作者:YuhangSong    | 项目源码 | 文件源码
def log_img(x,name,iteration=0,nrow=8):

    def log_img_final(x,name,iteration=0,nrow=8):
        vutils.save_image(
            x,
            LOGDIR+name+'_'+str(iteration)+'.png',
            nrow=nrow,
        )
        vis.images( 
            x.cpu().numpy(),
            win=str(MULTI_RUN)+'-'+name,
            opts=dict(caption=str(MULTI_RUN)+'-'+name+'_'+str(iteration)),
            nrow=nrow,
        )

    if params['REPRESENTATION']==chris_domain.VECTOR:
        x = vector2image(x)
    x = x.squeeze(1)
    if params['DOMAIN']=='2Dgrid':
        if x.size()[1]==2:
            log_img_final(x[:,0:1,:,:],name+'_b',iteration,nrow)
            log_img_final(x[:,1:2,:,:],name+'_a',iteration,nrow)
            x = torch.cat([x,x[:,0:1,:,:]],1)
    log_img_final(x,name,iteration,nrow)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x, hint):
        v = self.toH(hint)

        x0 = self.to0(x)
        x1 = self.to1(x0)
        x2 = self.to2(x1)
        x3 = self.to3(torch.cat([x2, v], 1))
        x4 = self.to4(x3)

        x = self.tunnel4(x4)

        x = self.tunnel3(torch.cat([x, x3.detach()], 1))
        x = self.tunnel2(torch.cat([x, x2.detach()], 1))
        x = self.tunnel1(torch.cat([x, x1.detach()], 1))
        x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))
        return x
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x, hint):
        v = self.toH(hint)

        x0 = self.to0(x)
        x1 = self.to1(x0)
        x2 = self.to2(x1)
        x3 = self.to3(torch.cat([x2, v], 1))
        x4 = self.to4(x3)

        x = self.tunnel4(x4)

        x = self.tunnel3(torch.cat([x, x3.detach()], 1))
        x = self.tunnel2(torch.cat([x, x2.detach()], 1))
        x = self.tunnel1(torch.cat([x, x1.detach()], 1))
        x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))
        return x
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x, hint):
        v = self.toH(hint)

        x0 = self.to0(x)
        x1 = self.to1(x0)
        x2 = self.to2(x1)
        x3 = self.to3(torch.cat([x2, v], 1))
        x4 = self.to4(x3)

        x = self.tunnel4(x4)

        x = self.tunnel3(torch.cat([x, x3.detach()], 1))
        x = self.tunnel2(torch.cat([x, x2.detach()], 1))
        x = self.tunnel1(torch.cat([x, x1.detach()], 1))
        x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))
        return x
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x, hint):
        v = self.toH(hint)

        x0 = self.to0(x)
        x1 = self.to1(x0)
        x2 = self.to2(x1)
        x3 = self.to3(torch.cat([x2, v], 1))
        x4 = self.to4(x3)

        x = self.tunnel4(x4)

        x = self.tunnel3(torch.cat([x, x3.detach()], 1))
        x = self.tunnel2(torch.cat([x, x2.detach()], 1))
        x = self.tunnel1(torch.cat([x, x1.detach()], 1))
        x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))
        return x
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x, hint):
        v = self.toH(hint)

        x0 = self.to0(x)
        x1 = self.to1(x0)
        x2 = self.to2(x1)
        x3 = self.to3(torch.cat([x2, v], 1))
        x4 = self.to4(x3)

        x = self.tunnel4(x4)

        x = self.tunnel3(torch.cat([x, x3.detach()], 1))
        x = self.tunnel2(torch.cat([x, x2.detach()], 1))
        x = self.tunnel1(torch.cat([x, x1.detach()], 1))
        x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))
        return x
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x, hint):
        v = self.toH(hint)

        x0 = self.to0(x)
        x1 = self.to1(x0)
        x2 = self.to2(x1)
        x3 = self.to3(torch.cat([x2, v], 1))
        x4 = self.to4(x3)

        x = self.tunnel4(x4)

        x = self.tunnel3(torch.cat([x, x3.detach()], 1))
        x = self.tunnel2(torch.cat([x, x2.detach()], 1))
        x = self.tunnel1(torch.cat([x, x1.detach()], 1))
        x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))
        return x
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def forward(self, x, hint):
        v = self.toH(hint)

        x0 = self.to0(x)
        x1 = self.to1(x0)
        x2 = self.to2(x1)
        x3 = self.to3(torch.cat([x2, v], 1))
        x4 = self.to4(x3)

        x = self.tunnel4(x4)

        x = self.tunnel3(torch.cat([x, x3.detach()], 1))
        x = self.tunnel2(torch.cat([x, x2.detach()], 1))
        x = self.tunnel1(torch.cat([x, x1.detach()], 1))
        x = F.tanh(self.exit(torch.cat([x, x0.detach()], 1)))
        return x
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def _loop(self):
        done = False
        total_reward, reward, iter = 0, 0, 0
        self.state = self.env.reset()
        while not done:
            action = self.policy()
            _state, reward, done, _ = self.env.step(action)
            # if _state is terminal, state value is 0
            v = 0 if done else self.state_value(_state)
            delta = reward + self.gamma * v - self.state_value(self.state)
            # \nabla_w v = s, since v = s^{\tim} w
            self.state_value_weight += self.beta * delta * to_tensor(self.state).float()
            # \pi(a) = x^{\top}(a)w, where x is feature and w is weight
            # \nabla\ln\pi(a) = x(a)\sum_b \pi(b)x(b)
            direction = self.feature(_state, action) - sum(
                    [self.softmax @ torch.cat([self.feature(_state, a).unsqueeze(0) for a in self.actions])])

            self.weight += self.alpha * pow(self.gamma, iter) * delta * direction
            total_reward += reward
            self.state = _state
            iter += 1
        return total_reward
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def _loop(self):
        done = False
        total_reward, reward, iter = 0, 0, 0
        self.state = self.env.reset()
        weight = self.weight
        while not done:
            action = self.policy()
            _state, reward, done, _ = self.env.step(action)
            # use current weight to generate an episode
            # \pi(a) = x^{\top}(a)w, where x is feature and w is weight
            # \nabla\ln\pi(a) = x(a)\sum_b \pi(b)x(b)
            delta = reward - self.state_value(_state)
            self.state_value_weight += self.beta * delta * to_tensor(_state).float()
            direction = self.feature(_state, action) - sum(
                [self.softmax @ torch.cat([self.feature(_state, a).unsqueeze(0) for a in self.actions])])
            weight += self.alpha * pow(self.gamma, iter) * delta * direction
            total_reward += reward
            iter += 1
        # update weight
        self.weight = weight
        return total_reward
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def _loop(self):
        done = False
        total_reward, reward, iter = 0, 0, 0
        self.state = self.env.reset()
        weight = self.weight
        while not done:
            action = self.policy()
            _state, reward, done, _ = self.env.step(action)
            # use current weight to generate an episode
            # \pi(a) = x^{\top}(a)w, where x is feature and w is weight
            # \nabla\ln\pi(a) = x(a)\sum_b \pi(b)x(b)
            direction = self.feature(_state, action) - sum(
                [self.softmax @ torch.cat([self.feature(_state, a).unsqueeze(0) for a in self.actions])])
            weight += self.alpha * pow(self.gamma, iter) * reward * direction
            total_reward += reward
            iter += 1
        # update weight
        self.weight = weight
        return total_reward
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def forward(self, mid_input, global_input):
        w = mid_input.size()[2]
        h = mid_input.size()[3]
        global_input = global_input.unsqueeze(2).unsqueeze(2).expand_as(mid_input)
        fusion_layer = torch.cat((mid_input, global_input), 1)
        fusion_layer = fusion_layer.permute(2, 3, 0, 1).contiguous()
        fusion_layer = fusion_layer.view(-1, 512)
        fusion_layer = self.bn1(self.fc1(fusion_layer))
        fusion_layer = fusion_layer.view(w, h, -1, 256)

        x = fusion_layer.permute(2, 3, 0, 1).contiguous()
        x = F.relu(self.bn2(self.conv1(x)))
        x = self.upsample(x)
        x = F.relu(self.bn3(self.conv2(x)))
        x = F.relu(self.bn4(self.conv3(x)))
        x = self.upsample(x)
        x = F.sigmoid(self.bn5(self.conv4(x)))
        x = self.upsample(self.conv5(x))
        return x
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def query(self, images):
        if self.pool_size == 0:
            return images
        return_images = []
        for image in images.data:
            image = torch.unsqueeze(image, 0)
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size-1)
                    tmp = self.images[random_id].clone()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:
                    return_images.append(image)
        return_images = Variable(torch.cat(return_images, 0))
        return return_images
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def default_collate(batch):
    "Puts each data field into a tensor with outer dimension batch size"
    if torch.is_tensor(batch[0]):
        return torch.cat([t.view(1, *t.size()) for t in batch], 0)
    elif isinstance(batch[0], int):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], str):
        return batch
    elif isinstance(batch[0], collections.Iterable):
        # if each batch element is not a tensor, then it should be a tuple
        # of tensors; in that case we collate each element in the tuple
        transposed = zip(*batch)
        return [default_collate(samples) for samples in transposed]

    raise TypeError(("batch must contain tensors, numbers, or lists; found {}"
                     .format(type(batch[0]))))
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _numerical_jacobian(self, module, input, jacobian_input=True, jacobian_parameters=True):
        output = self._forward(module, input)
        output_size = output.nelement()

        if jacobian_parameters:
            param, d_param = self._get_parameters(module)

        def fw(input):
            out = self._forward(module, input)
            if isinstance(out, Variable):
                return out.data
            return out

        res = tuple()
        # TODO: enable non-contig tests
        input = contiguous(input)
        if jacobian_input:
            res += get_numerical_jacobian(fw, input, input),
        if jacobian_parameters:
            res += torch.cat(list(get_numerical_jacobian(fw, input, p) for p in param), 0),
        return res
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_cat(self):
        SIZE = 10
        # 2-arg cat
        for dim in range(3):
            x = torch.rand(13, SIZE, SIZE).transpose(0, dim)
            y = torch.rand(17, SIZE, SIZE).transpose(0, dim)
            res1 = torch.cat((x, y), dim)
            self.assertEqual(res1.narrow(dim, 0, 13), x, 0)
            self.assertEqual(res1.narrow(dim, 13, 17), y, 0)

        # Check iterables
        for dim in range(3):
            x = torch.rand(13, SIZE, SIZE).transpose(0, dim)
            y = torch.rand(17, SIZE, SIZE).transpose(0, dim)
            z = torch.rand(19, SIZE, SIZE).transpose(0, dim)

            res1 = torch.cat((x, y, z), dim)
            self.assertEqual(res1.narrow(dim, 0, 13), x, 0)
            self.assertEqual(res1.narrow(dim, 13, 17), y, 0)
            self.assertEqual(res1.narrow(dim, 30, 19), z, 0)
            self.assertRaises(ValueError, lambda: torch.cat([]))
项目:a3c-mujoco    作者:Feryal    | 项目源码 | 文件源码
def forward(self, non_rgb_state, rgb_state, h):
        x = self.relu(self.conv1(rgb_state))
        x = self.relu(self.conv2(x))
        x = x.view(x.size(0), -1)
        x = self.fc1(torch.cat((x, non_rgb_state), 1))
        h = self.lstm(x, h)  # h is (hidden state, cell state)
        x = h[0]
        policy1 = self.softmax(self.fc_actor1(x)).clamp(
            max=1 - 1e-20)  # Prevent 1s and hence NaNs
        policy2 = self.softmax(self.fc_actor2(x)).clamp(max=1 - 1e-20)
        policy3 = self.softmax(self.fc_actor3(x)).clamp(max=1 - 1e-20)
        policy4 = self.softmax(self.fc_actor4(x)).clamp(max=1 - 1e-20)
        policy5 = self.softmax(self.fc_actor5(x)).clamp(max=1 - 1e-20)
        policy6 = self.softmax(self.fc_actor6(x)).clamp(max=1 - 1e-20)
        V = self.fc_critic(x)
        return (policy1, policy2, policy3, policy4, policy5, policy6), V, h
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def encode(matched, priors, variances):
    """Encode the variances from the priorbox layers into the ground truth boxes
    we have matched (based on jaccard overlap) with the prior boxes.
    Args:
        matched: (tensor) Coords of ground truth for each prior in point-form
            Shape: [num_priors, 4].
        priors: (tensor) Prior boxes in center-offset form
            Shape: [num_priors,4].
        variances: (list[float]) Variances of priorboxes
    Return:
        encoded boxes (tensor), Shape: [num_priors, 4]
    """

    # dist b/t match center and prior's center
    g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
    # encode variance
    g_cxcy /= (variances[0] * priors[:, 2:])
    # match wh / prior wh
    g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
    g_wh = torch.log(g_wh) / variances[1]
    # return target for smooth_l1_loss
    return torch.cat([g_cxcy, g_wh], 1)  # [num_priors,4]


# Adapted from https://github.com/Hakuyume/chainer-ssd
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def decode(loc, priors, variances):
    """Decode locations from predictions using priors to undo
    the encoding we did for offset regression at train time.
    Args:
        loc (tensor): location predictions for loc layers,
            Shape: [num_priors,4]
        priors (tensor): Prior boxes in center-offset form.
            Shape: [num_priors,4].
        variances: (list[float]) Variances of priorboxes
    Return:
        decoded bounding box predictions
    """

    boxes = torch.cat((
        priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
        priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
    boxes[:, :2] -= boxes[:, 2:] / 2
    boxes[:, 2:] += boxes[:, :2]
    return boxes
项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def query(self, images):
        if self.pool_size == 0:
            return images
        return_images = []
        for image in images.data:
            image = torch.unsqueeze(image, 0)
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size-1)
                    tmp = self.images[random_id].clone()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:
                    return_images.append(image)
        return_images = Variable(torch.cat(return_images, 0))
        return return_images
项目:kaggle-planet    作者:ZijunDeng    | 项目源码 | 文件源码
def validate(val_loader, net, criterion):
    net.eval()
    batch_outputs = []
    batch_labels = []
    for vi, data in enumerate(val_loader, 0):
        inputs, labels = data
        inputs = Variable(inputs, volatile=True).cuda()
        labels = Variable(labels.float(), volatile=True).cuda()

        outputs = net(inputs)

        batch_outputs.append(outputs)
        batch_labels.append(labels)

    batch_outputs = torch.cat(batch_outputs)
    batch_labels = torch.cat(batch_labels)
    val_loss = criterion(batch_outputs, batch_labels)
    val_loss = val_loss.data[0]

    print '--------------------------------------------------------'
    print '[val_loss %.4f]' % val_loss
    net.train()
    return val_loss
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_elmo_4D_input(self):
        sentences = [[['The', 'sentence', '.'],
                      ['ELMo', 'helps', 'disambiguate', 'ELMo', 'from', 'Elmo', '.']],
                     [['1', '2'], ['1', '2', '3', '4', '5', '6', '7']],
                     [['1', '2', '3', '4', '50', '60', '70'], ['The']]]

        all_character_ids = []
        for batch_sentences in sentences:
            all_character_ids.append(self._sentences_to_ids(batch_sentences))

        # (2, 3, 7, 50)
        character_ids = torch.cat([ids.unsqueeze(1) for ids in all_character_ids], dim=1)
        embeddings_4d = self.elmo(character_ids)

        # Run the individual batches.
        embeddings_3d = []
        for char_ids in all_character_ids:
            self.elmo._elmo_lstm._elmo_lstm.reset_states()
            embeddings_3d.append(self.elmo(char_ids))

        for k in range(3):
            numpy.testing.assert_array_almost_equal(
                    embeddings_4d['elmo_representations'][0][:, k, :, :].data.numpy(),
                    embeddings_3d[k]['elmo_representations'][0].data.numpy()
            )
项目:deep-text-corrector    作者:andabi    | 项目源码 | 文件源码
def forward(self, input, last_context, last_hidden, encoder_outputs):
        # input.size() = (B, 1), last_context.size() = (B, H), last_hidden.size() = (L, B, H), encoder_outputs.size() = (B, S, H)
        # word_embedded.size() = (B, 1, H)
        # print input.size()
        word_embedded = self.embedding(input)

        # rnn_input.size() = (B, 1, 2H), rnn_output.size() = (B, 1, H)
        # print word_embedded.size(), last_context.unsqueeze(1).size()
        rnn_input = torch.cat((word_embedded, last_context.unsqueeze(1)), -1)
        rnn_output, hidden = self.gru(rnn_input, last_hidden)
        rnn_output = rnn_output.squeeze(1)  # B x S=1 x H -> B x H

        # atten_weights.size() = (B, S)
        attn_weights = self.attn(rnn_output, encoder_outputs)
        context = attn_weights.unsqueeze(1).bmm(encoder_outputs).squeeze(1)  # B x H

        # TODO tanh?
        # Final output layer (next word prediction) using the RNN hidden state and context vector
        output = self.out(torch.cat((rnn_output, context), -1))  # B x V

        # Return final output, hidden state, and attention weights (for visualization)
        # output.size() = (B, V)
        return output, context, hidden, attn_weights
项目:DenseNet    作者:kevinzakka    | 项目源码 | 文件源码
def forward(self, x):
        """
        Compute the forward pass of the composite transformation H(x),
        where x is the concatenation of the current and all preceding
        feature maps.
        """
        if self.bottleneck:
            out = self.conv1(F.relu(self.bn1(x)))
            if self.p > 0:
                out = F.dropout(out, p=self.p, training=self.training)
            out = self.conv2(F.relu(self.bn2(out)))
            if self.p > 0:
                out = F.dropout(out, p=self.p, training=self.training)
        else:
            out = self.conv2(F.relu(self.bn2(x)))
            if self.p > 0:
                out = F.dropout(out, p=self.p, training=self.training)  
        return torch.cat((x, out), 1)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def forward(self, q, k, v, attn_mask):
        d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
        residual = q

        bsz, len_q, d_model = q.size()
        len_k, len_v = k.size(1), v.size(1)

        def reshape(x):
            """[bsz, len, d_*] -> [n_head x (bsz*len) x d_*]"""
            return x.repeat(n_head, 1, 1).view(n_head, -1, d_model)

        q_s, k_s, v_s = map(reshape, [q, k, v])

        q_s = torch.bmm(q_s, self.w_qs).view(-1, len_q, d_k)
        k_s = torch.bmm(k_s, self.w_ks).view(-1, len_k, d_k)
        v_s = torch.bmm(v_s, self.w_vs).view(-1, len_v, d_v)

        outputs = self.attention(q_s, k_s, v_s, attn_mask.repeat(n_head, 1, 1))
        outputs = torch.cat(torch.split(outputs, bsz, dim=0), dim=-1).view(-1, n_head*d_v)
        outputs = F.dropout(self.w_o(outputs), p=self.dropout).view(bsz, len_q, -1)
        return self.lm(outputs + residual)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _score_sentence(self, input, tags):
        bsz, sent_len, l_size = input.size()
        score = Variable(self.torch.FloatTensor(bsz).fill_(0.))
        s_score = Variable(self.torch.LongTensor([[START]]*bsz))

        tags = torch.cat([s_score, tags], dim=-1)
        input_t = input.transpose(0, 1)

        for i, words in enumerate(input_t):
            temp = self.transitions.index_select(1, tags[:, i])
            bsz_t = gather_index(temp.transpose(0, 1), tags[:, i + 1])
            w_step_score = gather_index(words, tags[:, i+1])
            score = score + bsz_t + w_step_score

        temp = self.transitions.index_select(1, tags[:, -1])
        bsz_t = gather_index(temp.transpose(0, 1),
                    Variable(self.torch.LongTensor([STOP]*bsz)))
        return score+bsz_t
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def forward(self, input):
        bsz, sent_len, l_size = input.size()
        init_alphas = self.torch.FloatTensor(bsz, self.label_size).fill_(-10000.)
        init_alphas[:, START].fill_(0.)
        forward_var = Variable(init_alphas)

        input_t = input.transpose(0, 1)
        for words in input_t:
            alphas_t = []
            for next_tag in range(self.label_size):
                emit_score = words[:, next_tag].contiguous()
                emit_score = emit_score.unsqueeze(1).expand_as(words)

                trans_score = self.transitions[next_tag, :].view(1, -1).expand_as(words)
                next_tag_var = forward_var + trans_score + emit_score
                alphas_t.append(log_sum_exp(next_tag_var, True))
            forward_var = torch.cat(alphas_t, dim=-1)

        return log_sum_exp(forward_var)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _word_repre_layer(self, input):
        """
        args:
            - input: (q_sentence, q_words)|(a_sentence, a_words)
              q_sentence - [batch_size, sent_length]
              q_words - [batch_size, sent_length, words_len]
        return:
            - output: [batch_size, sent_length, context_dim]
        """
        sentence, words = input
        # [batch_size, sent_length, corpus_emb_dim]
        s_encode = self.corpus_emb(sentence)

        # [batch_size, sent_length, word_lstm_dim]
        w_encode = self._word_repre_forward(words)
        w_encode = F.dropout(w_encode, p=self.dropout, training=True, inplace=False)

        out = torch.cat((s_encode, w_encode), 2)
        return out
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _aggre(self, q_aware_reps, a_aware_reps):
        """
        Aggregation Layer handle
        Args:
            q_aware_reps - [batch_size, question_len, 11*mp_dim+6]
            a_aware_reps - [batch_size, answer_len, 11*mp_dim+6]
        Return:
            size - [batch_size, aggregation_lstm_dim*4]
        """
        _aggres = []
        _, (q_hidden, _) = self.aggre_lstm(q_aware_reps)
        _, (a_hidden, _) = self.aggre_lstm(a_aware_reps)

        # [batch_size, aggregation_lstm_dim]
        _aggres.append(q_hidden[-2])
        _aggres.append(q_hidden[-1])
        _aggres.append(a_hidden[-2])
        _aggres.append(a_hidden[-1])
        return torch.cat(_aggres, dim=1)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __next__(self):
        def img2variable(img_files):
            tensors = [self._encode(Image.open(self._path + img_name)).unsqueeze(0)
                    for img_name in img_files]
            v = Variable(torch.cat(tensors, 0))
            if self._is_cuda: v = v.cuda()
            return v

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()       

        _start = self._step*self._batch_size
        self._step += 1

        return img2variable(self._img_files[_start:_start+self._batch_size])
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def __init__(self):
        self.data_location = 'cat.npz'
        self.enc_hidden_size = 256
        self.dec_hidden_size = 512
        self.Nz = 128
        self.M = 20
        self.dropout = 0.9
        self.batch_size = 100
        self.eta_min = 0.01
        self.R = 0.99995
        self.KL_min = 0.2
        self.wKL = 0.5
        self.lr = 0.001
        self.lr_decay = 0.9999
        self.min_lr = 0.00001
        self.grad_clip = 1.
        self.temperature = 0.4
        self.max_seq_length = 200
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def forward(self, inputs, batch_size, hidden_cell=None):
        if hidden_cell is None:
            # then must init with zeros
            if use_cuda:
                hidden = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size).cuda())
                cell = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size).cuda())
            else:
                hidden = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size))
                cell = Variable(torch.zeros(2, batch_size, hp.enc_hidden_size))
            hidden_cell = (hidden, cell)
        _, (hidden,cell) = self.lstm(inputs.float(), hidden_cell)
        # hidden is (2, batch_size, hidden_size), we want (batch_size, 2*hidden_size):
        hidden_forward, hidden_backward = torch.split(hidden,1,0)
        hidden_cat = torch.cat([hidden_forward.squeeze(0), hidden_backward.squeeze(0)],1)
        # mu and sigma:
        mu = self.fc_mu(hidden_cat)
        sigma_hat = self.fc_sigma(hidden_cat)
        sigma = torch.exp(sigma_hat/2.)
        # N ~ N(0,1)
        z_size = mu.size()
        if use_cuda:
            N = Variable(torch.normal(torch.zeros(z_size),torch.ones(z_size)).cuda())
        else:
            N = Variable(torch.normal(torch.zeros(z_size),torch.ones(z_size)))
        z = mu + sigma*N
        # mu and sigma_hat are needed for LKL loss
        return z, mu, sigma_hat
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def make_target(self, batch, lengths):
        if use_cuda:
            eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
                *batch.size()[1]).cuda()).unsqueeze(0)
        else:
            eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
                *batch.size()[1])).unsqueeze(0)
        batch = torch.cat([batch, eos], 0)
        mask = torch.zeros(Nmax+1, batch.size()[1])
        for indice,length in enumerate(lengths):
            mask[:length,indice] = 1
        if use_cuda:
            mask = Variable(mask.cuda()).detach()
        else:
            mask = Variable(mask).detach()
        dx = torch.stack([Variable(batch.data[:,:,0])]*hp.M,2).detach()
        dy = torch.stack([Variable(batch.data[:,:,1])]*hp.M,2).detach()
        p1 = Variable(batch.data[:,:,2]).detach()
        p2 = Variable(batch.data[:,:,3]).detach()
        p3 = Variable(batch.data[:,:,4]).detach()
        p = torch.stack([p1,p2,p3],2)
        return mask,dx,dy,p
项目:multiNLI_encoder    作者:easonnie    | 项目源码 | 文件源码
def select_last(inputs, lengths, hidden_size):
    """
    :param inputs: [T * B * D] D = 2 * hidden_size
    :param lengths: [B]
    :param hidden_size: dimension 
    :return:  [B * D]
    """
    batch_size = inputs.size(1)
    batch_out_list = []
    for b in range(batch_size):
        batch_out_list.append(torch.cat((inputs[lengths[b] - 1, b, :hidden_size],
                                         inputs[0, b, hidden_size:])
                                        )
                              )

    out = torch.stack(batch_out_list)
    return out
项目:multiNLI_encoder    作者:easonnie    | 项目源码 | 文件源码
def pack_to_matching_matrix(s1, s2, cat_only=[False, False]):
    t1 = s1.size(0)
    t2 = s2.size(0)
    batch_size = s1.size(1)
    d = s1.size(2)

    expanded_p_s1 = s1.expand(t2, t1, batch_size, d)

    expanded_p_s2 = s2.view(t2, 1, batch_size, d)
    expanded_p_s2 = expanded_p_s2.expand(t2, t1, batch_size, d)

    if not cat_only[0] and not cat_only[1]:
        matrix = torch.cat((expanded_p_s1, expanded_p_s2), dim=3)
    elif not cat_only[0] and cat_only[1]:
        matrix = torch.cat((expanded_p_s1, expanded_p_s2, expanded_p_s1 * expanded_p_s2), dim=3)
    else:
        matrix = torch.cat((expanded_p_s1,
                            expanded_p_s2,
                            torch.abs(expanded_p_s1 - expanded_p_s2),
                            expanded_p_s1 * expanded_p_s2), dim=3)

    # matrix = torch.cat((expanded_p_s1,
    #                     expanded_p_s2), dim=3)

    return matrix
项目:pyro    作者:uber    | 项目源码 | 文件源码
def expand_z_where(z_where):
    # Take a batch of three-vectors, and massages them into a batch of
    # 2x3 matrices with elements like so:
    # [s,x,y] -> [[s,0,x],
    #             [0,s,y]]
    n = z_where.size(0)
    out = torch.cat((ng_zeros([1, 1]).type_as(z_where).expand(n, 1), z_where), 1)
    ix = Variable(expansion_indices)
    if z_where.is_cuda:
        ix = ix.cuda()
    out = torch.index_select(out, 1, ix)
    out = out.view(n, 2, 3)
    return out


# Scaling by `1/scale` here is unsatisfactory, as `scale` could be
# zero.
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def forward(self, *inputs):
        dim = inputs[0].dim()
        assert_(dim in [4, 5],
                'Input tensors must either be 4 or 5 '
                'dimensional, but inputs[0] is {}D.'.format(dim),
                ShapeError)
        # Get resize function
        spatial_dim = {4: 2, 5: 3}[dim]
        resize_function = getattr(F, 'adaptive_{}_pool{}d'.format(self.pool_mode,
                                                                  spatial_dim))
        target_size = pyu.as_tuple_of_len(self.target_size, spatial_dim)
        # Do the resizing
        resized_inputs = []
        for input_num, input in enumerate(inputs):
            # Make sure the dim checks out
            assert_(input.dim() == dim,
                    "Expected inputs[{}] to be a {}D tensor, got a {}D "
                    "tensor instead.".format(input_num, dim, input.dim()),
                    ShapeError)
            resized_inputs.append(resize_function(input, target_size))
        # Concatenate along the channel axis
        concatenated = torch.cat(tuple(resized_inputs), 1)
        # Done
        return concatenated
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def __init__(self, shared_resources: SharedResources):
        super(FastQAPyTorchModule, self).__init__()
        self._shared_resources = shared_resources
        input_size = shared_resources.config["repr_dim_input"]
        size = shared_resources.config["repr_dim"]
        self._size = size
        self._with_char_embeddings = self._shared_resources.config.get("with_char_embeddings", False)

        # modules & parameters
        if self._with_char_embeddings:
            self._conv_char_embedding = embedding.ConvCharEmbeddingModule(
                len(shared_resources.char_vocab), size)
            self._embedding_projection = nn.Linear(size + input_size, size)
            self._embedding_highway = Highway(size, 1)
            self._v_wiq_w = nn.Parameter(torch.ones(1, 1, input_size + size))
            input_size = size
        else:
            self._v_wiq_w = nn.Parameter(torch.ones(1, 1, input_size))

        self._bilstm = BiLSTM(input_size + 2, size)
        self._answer_layer = FastQAAnswerModule(shared_resources)

        # [size, 2 * size]
        self._question_projection = nn.Parameter(torch.cat([torch.eye(size), torch.eye(size)], dim=1))
        self._support_projection = nn.Parameter(torch.cat([torch.eye(size), torch.eye(size)], dim=1))
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def backward(ctx, grad_outputs):
        size = grad_outputs.size(1)
        segm_sorted = torch.sort(ctx.rev_segm_sorted)[1]
        grad_outputs = torch.index_select(grad_outputs, 0, segm_sorted)

        offset = [ctx.num_zeros]

        def backward_segment(l, n):
            segment_grad = grad_outputs.narrow(0, offset[0], n // l)
            if l > 1:
                segment_grad = _MyMax.backward(ctx.maxes[l], segment_grad)[0].view(n, size)
            offset[0] += n // l
            return segment_grad

        segment_grads = [backward_segment(l, n) for l, n in enumerate(ctx.num_lengths) if n > 0]
        grads = torch.cat(segment_grads, 0)
        rev_length_sorted = torch.sort(ctx.lengths_sorted)[1]
        grads = torch.index_select(grads, 0, rev_length_sorted)

        return grads, None, None, None
项目:MP-CNN-Variants    作者:tuzhucheng    | 项目源码 | 文件源码
def forward(self, sent1_idx, sent2_idx, ext_feats=None):
        # Select embedding
        sent1 = self.embedding(sent1_idx).transpose(1, 2)
        sent2 = self.embedding(sent2_idx).transpose(1, 2)

        # Sentence modeling module
        sent1_block_a, sent1_block_b = self._get_blocks_for_sentence(sent1)
        sent2_block_a, sent2_block_b = self._get_blocks_for_sentence(sent2)

        # Similarity measurement layer
        feat_h = self._algo_1_horiz_comp(sent1_block_a, sent2_block_a)
        feat_v = self._algo_2_vert_comp(sent1_block_a, sent2_block_a, sent1_block_b, sent2_block_b)
        combined_feats = [feat_h, feat_v, ext_feats] if self.ext_feats else [feat_h, feat_v]
        feat_all = torch.cat(combined_feats, dim=1)

        preds = self.final_layers(feat_all)
        return preds
项目:sceneReco    作者:bear63    | 项目源码 | 文件源码
def __call__(self, batch):
        images, labels = zip(*batch)

        imgH = self.imgH
        imgW = self.imgW
        if self.keep_ratio:
            ratios = []
            for image in images:
                w, h = image.size
                ratios.append(w / float(h))
            ratios.sort()
            max_ratio = ratios[-1]
            imgW = int(np.floor(max_ratio * imgH))
            imgW = max(imgH * self.min_ratio, imgW)  # assure imgH >= imgW

        transform = resizeNormalize((imgW, imgH))
        images = [transform(image) for image in images]
        images = torch.cat([t.unsqueeze(0) for t in images], 0)

        return images, labels
项目:pytorch-skipthoughts    作者:kaniblu    | 项目源码 | 文件源码
def prepare_batches(self, batch_data, chunks, **kwargs):
        x, x_lens, ys, ys_lens = batch_data
        batch_dim = 0 if self.batch_first else 1

        x_list = x.chunk(chunks, 0)
        x_lens_list = x_lens.chunk(chunks, 0)
        ys_list = ys.chunk(chunks, batch_dim)
        ys_lens_list = ys_lens.chunk(chunks, batch_dim)
        inp_list = [x_list, x_lens_list, ys_list, ys_lens_list]

        data_list = []
        for inp in zip(*inp_list):
            data = self.prepare_batch(inp, **kwargs)
            data_list.append(data)

        data_list = list(zip(*data_list))
        ret_list = []

        for data in data_list:
            data = [d.unsqueeze(0) for d in data]
            data = torch.cat(data)
            ret_list.append(data)

        return ret_list
项目:simple-pix2pix-pytorch    作者:Eiji-Kb    | 项目源码 | 文件源码
def forward(self, x):           
        en0 = self.c0(x)
        en1 = self.bnc1(self.c1(F.leaky_relu(en0, negative_slope=0.2)))
        en2 = self.bnc2(self.c2(F.leaky_relu(en1, negative_slope=0.2)))
        en3 = self.bnc3(self.c3(F.leaky_relu(en2, negative_slope=0.2)))
        en4 = self.bnc4(self.c4(F.leaky_relu(en3, negative_slope=0.2)))
        en5 = self.bnc5(self.c5(F.leaky_relu(en4, negative_slope=0.2)))
        en6 = self.bnc6(self.c6(F.leaky_relu(en5, negative_slope=0.2)))
        en7 = self.c7(F.leaky_relu(en6, negative_slope=0.2))

        de7 = self.bnd7(self.d7(F.relu(en7)))
        de6 = F.dropout(self.bnd6(self.d6(F.relu(torch.cat((en6, de7),1)))))
        de5 = F.dropout(self.bnd5(self.d5(F.relu(torch.cat((en5, de6),1)))))

        de4 = F.dropout(self.bnd4(self.d4(F.relu(torch.cat((en4, de5),1)))))
        de3 = self.bnd3(self.d3(F.relu(torch.cat((en3, de4),1))))
        de2 = self.bnd2(self.d2(F.relu(torch.cat((en2, de3),1))))
        de1 = self.bnd1(self.d1(F.relu(torch.cat((en1, de2),1))))

        de0 = F.tanh(self.d0(F.relu(torch.cat((en0, de1),1))))       

        return de0
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def discount(rewards, gamma):
    tensor = False
    if not isinstance(rewards, list):
        tensor = True
        rewards = rewards.split(1)
    R = 0.0
    discounted = []
    for r in rewards[::-1]:
        R = r + gamma * R
        discounted.insert(0, R)
    if tensor:
        return th.cat(discounted).view(-1)
    return T(discounted)
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def generalized_advantage_estimations(rewards, values, terminal=None, gamma=0.99, tau=0.95):
    gae = 0.0
    advantages = []
    values = th.cat([values, V(T([0.0077]))])
    for i in reversed(range(len(rewards))):
        nonterminal = 1.0 - terminal[i]
        delta = rewards[i] + gamma * values[i+1] * nonterminal - values[i]
        gae = delta + gamma * tau * gae * nonterminal
        advantages.insert(0, gae + values[i])
    return th.cat(advantages)