Python torch 模块,stack() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.stack()

项目:pytorch-semantic-segmentation    作者:ZijunDeng    | 项目源码 | 文件源码
def forward(self, x):
        x_shape = x.size()  # (b, c, h, w)
        offset = self.offset_filter(x)  # (b, 2*c, h, w)
        offset_w, offset_h = torch.split(offset, self.regular_filter.in_channels, 1)  # (b, c, h, w)
        offset_w = offset_w.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]))  # (b*c, h, w)
        offset_h = offset_h.contiguous().view(-1, int(x_shape[2]), int(x_shape[3]))  # (b*c, h, w)
        if not self.input_shape or self.input_shape != x_shape:
            self.input_shape = x_shape
            grid_w, grid_h = np.meshgrid(np.linspace(-1, 1, x_shape[3]), np.linspace(-1, 1, x_shape[2]))  # (h, w)
            grid_w = torch.Tensor(grid_w)
            grid_h = torch.Tensor(grid_h)
            if self.cuda:
                grid_w = grid_w.cuda()
                grid_h = grid_h.cuda()
            self.grid_w = nn.Parameter(grid_w)
            self.grid_h = nn.Parameter(grid_h)
        offset_w = offset_w + self.grid_w  # (b*c, h, w)
        offset_h = offset_h + self.grid_h  # (b*c, h, w)
        x = x.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])).unsqueeze(1)  # (b*c, 1, h, w)
        x = F.grid_sample(x, torch.stack((offset_h, offset_w), 3))  # (b*c, h, w)
        x = x.contiguous().view(-1, int(x_shape[1]), int(x_shape[2]), int(x_shape[3]))  # (b, c, h, w)
        x = self.regular_filter(x)
        return x
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def forward(self, x):
        outputs = []
        h_t = Variable(torch.zeros(x.size(0), self.hidden_size).cuda())
        c_t = Variable(torch.zeros(x.size(0), self.hidden_size).cuda())


        for i, input_t in enumerate(x.chunk(x.size(1), dim=1)):
            input_t = input_t.contiguous().view(input_t.size()[0], 1)
            h_t, c_t = self.lstm1(input_t, (h_t, c_t))
            outputs += [c_t]
        outputs = torch.stack(outputs, 1).squeeze(2)
        shp=(outputs.size()[0], outputs.size()[1])
        out = outputs.contiguous().view(shp[0] *shp[1] , self.hidden_size)
        out = self.fc(out)
        out = out.view(shp[0], shp[1], self.num_classes)

        return out
项目:DeepLearning_PlantDiseases    作者:MarkoArsenovic    | 项目源码 | 文件源码
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):    
    img = np.copy(image)
    height, width,_= img.shape
    output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
    output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
    ocludedImages=[]
    for h in range(output_height):
        for w in range(output_width):
            #occluder region
            h_start = h*occluding_stride
            w_start = w*occluding_stride
            h_end = min(height, h_start + occluding_size)
            w_end = min(width, w_start + occluding_size)

            input_image = copy.copy(img)
            input_image[h_start:h_end,w_start:w_end,:] =  0
            ocludedImages.append(preprocess(Image.fromarray(input_image)))

    L = np.empty(output_height*output_width)
    L.fill(groundTruth)
    L = torch.from_numpy(L)
    tensor_images = torch.stack([img for img in ocludedImages])
    dataset = torch.utils.data.TensorDataset(tensor_images,L) 
    dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8) 

    heatmap=np.empty(0)
    model.eval()
    for data in dataloader:
        images, labels = data

        if use_gpu:
            images, labels = (images.cuda()), (labels.cuda(async=True))

        outputs = model(Variable(images))
        m = nn.Softmax()
        outputs=m(outputs)
        if use_gpu:   
            outs=outputs.cpu()
        heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))

    return heatmap.reshape((output_height, output_width))
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for sample in batch:
        imgs.append(sample[0])
        targets.append(torch.FloatTensor(sample[1]))
    return torch.stack(imgs, 0), targets
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def batch_tensors(cls, tensor_list: List[DataArray]) -> DataArray:  # type: ignore
        """
        Takes the output of ``Field.as_tensor()`` from a list of ``Instances`` and merges it into
        one batched tensor for this ``Field``.  The default implementation here in the base class
        handles cases where ``as_tensor`` returns a single torch tensor per instance, or a
        dictionary of single tensors.  If your subclass returns something other than this, you need
        to override this method.
        """
        if isinstance(tensor_list[0], dict):
            # This is creating a dict of {token_indexer_key: batch_tensor} for each
            # token indexer used to index this field. This is mostly utilised by TextFields.
            token_indexer_key_to_batch_dict: Dict[str, List[torch.Tensor]] = defaultdict(list)
            for encoding_name_dict in tensor_list:
                for indexer_name, tensor in encoding_name_dict.items():
                    token_indexer_key_to_batch_dict[indexer_name].append(tensor)
            return {indexer_name: torch.stack(tensor_list)
                    for indexer_name, tensor_list in token_indexer_key_to_batch_dict.items()}
        else:
            return torch.stack(tensor_list)
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def make_batch(batch_size):
    batch_idx = np.random.choice(len(data),batch_size)
    batch_sequences = [data[idx] for idx in batch_idx]
    strokes = []
    lengths = []
    indice = 0
    for seq in batch_sequences:
        len_seq = len(seq[:,0])
        new_seq = np.zeros((Nmax,5))
        new_seq[:len_seq,:2] = seq[:,:2]
        new_seq[:len_seq-1,2] = 1-seq[:-1,2]
        new_seq[:len_seq,3] = seq[:,2]
        new_seq[(len_seq-1):,4] = 1
        new_seq[len_seq-1,2:4] = 0
        lengths.append(len(seq[:,0]))
        strokes.append(new_seq)
        indice += 1

    if use_cuda:
        batch = Variable(torch.from_numpy(np.stack(strokes,1)).cuda().float())
    else:
        batch = Variable(torch.from_numpy(np.stack(strokes,1)).float())
    return batch, lengths

################################ adaptive lr
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def make_target(self, batch, lengths):
        if use_cuda:
            eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
                *batch.size()[1]).cuda()).unsqueeze(0)
        else:
            eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\
                *batch.size()[1])).unsqueeze(0)
        batch = torch.cat([batch, eos], 0)
        mask = torch.zeros(Nmax+1, batch.size()[1])
        for indice,length in enumerate(lengths):
            mask[:length,indice] = 1
        if use_cuda:
            mask = Variable(mask.cuda()).detach()
        else:
            mask = Variable(mask).detach()
        dx = torch.stack([Variable(batch.data[:,:,0])]*hp.M,2).detach()
        dy = torch.stack([Variable(batch.data[:,:,1])]*hp.M,2).detach()
        p1 = Variable(batch.data[:,:,2]).detach()
        p2 = Variable(batch.data[:,:,3]).detach()
        p3 = Variable(batch.data[:,:,4]).detach()
        p = torch.stack([p1,p2,p3],2)
        return mask,dx,dy,p
项目:multiNLI_encoder    作者:easonnie    | 项目源码 | 文件源码
def select_last(inputs, lengths, hidden_size):
    """
    :param inputs: [T * B * D] D = 2 * hidden_size
    :param lengths: [B]
    :param hidden_size: dimension 
    :return:  [B * D]
    """
    batch_size = inputs.size(1)
    batch_out_list = []
    for b in range(batch_size):
        batch_out_list.append(torch.cat((inputs[lengths[b] - 1, b, :hidden_size],
                                         inputs[0, b, hidden_size:])
                                        )
                              )

    out = torch.stack(batch_out_list)
    return out
项目:multiNLI_encoder    作者:easonnie    | 项目源码 | 文件源码
def max_along_time(inputs, lengths):
    """
    :param inputs: [T * B * D] 
    :param lengths:  [B]
    :return: [B * D] max_along_time
    """
    ls = list(lengths)

    b_seq_max_list = []
    for i, l in enumerate(ls):
        seq_i = inputs[:l, i, :]
        seq_i_max, _ = seq_i.max(dim=0)
        seq_i_max = seq_i_max.squeeze()
        b_seq_max_list.append(seq_i_max)

    return torch.stack(b_seq_max_list)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def _dist_and_values(self, *args, **kwargs):
        # XXX currently this whole object is very inefficient
        values, logits = [], []
        for value, logit in self._gen_weighted_samples(*args, **kwargs):
            ix = _index(values, value)
            if ix == -1:
                # Value is new.
                values.append(value)
                logits.append(logit)
            else:
                # Value has already been seen.
                logits[ix] = util.log_sum_exp(torch.stack([logits[ix], logit]).squeeze())

        logits = torch.stack(logits).squeeze()
        logits -= util.log_sum_exp(logits)
        if not isinstance(logits, torch.autograd.Variable):
            logits = Variable(logits)
        logits = logits - util.log_sum_exp(logits)

        d = dist.Categorical(logits=logits, one_hot=False)
        return d, values
项目:pyro    作者:uber    | 项目源码 | 文件源码
def enumerate_support(self):
        """
        Returns the Bernoulli distribution's support, as a tensor along the first dimension.

        Note that this returns support values of all the batched RVs in lock-step, rather
        than the full cartesian product. To iterate over the cartesian product, you must
        construct univariate Bernoullis and use itertools.product() over all univariate
        variables (may be expensive).

        :return: torch variable enumerating the support of the Bernoulli distribution.
            Each item in the return value, when enumerated along the first dimensions, yields a
            value from the distribution's support which has the same dimension as would be returned by
            sample.
        :rtype: torch.autograd.Variable.
        """
        return Variable(torch.stack([torch.Tensor([t]).expand_as(self.ps) for t in [0, 1]]))
项目:sru    作者:taolei87    | 项目源码 | 文件源码
def forward(self, input, c0=None, return_hidden=True):
        assert input.dim() == 3 # (len, batch, n_in)
        dir_ = 2 if self.bidirectional else 1
        if c0 is None:
            zeros = Variable(input.data.new(
                input.size(1), self.n_out*dir_
            ).zero_())
            c0 = [ zeros for i in range(self.depth) ]
        else:
            assert c0.dim() == 3    # (depth, batch, n_out*dir_)
            c0 = [ x.squeeze(0) for x in c0.chunk(self.depth, 0) ]

        prevx = input
        lstc = []
        for i, rnn in enumerate(self.rnn_lst):
            h, c = rnn(prevx, c0[i])
            prevx = h
            lstc.append(c)

        if return_hidden:
            return prevx, torch.stack(lstc)
        else:
            return prevx
项目:bandit-nmt    作者:khanhptnk    | 项目源码 | 文件源码
def translate(self, inputs, max_length):
        targets, init_states = self.initialize(inputs, eval=True)
        emb, output, hidden, context = init_states

        preds = [] 
        batch_size = targets.size(1)
        num_eos = targets[0].data.byte().new(batch_size).zero_()

        for i in range(max_length):
            output, hidden = self.decoder.step(emb, output, hidden, context)
            logit = self.generator(output)
            pred = logit.max(1)[1].view(-1).data
            preds.append(pred)

            # Stop if all sentences reach EOS.
            num_eos |= (pred == lib.Constants.EOS)
            if num_eos.sum() == batch_size: break

            emb = self.decoder.word_lut(Variable(pred))

        preds = torch.stack(preds)
        return preds
项目:bandit-nmt    作者:khanhptnk    | 项目源码 | 文件源码
def sample(self, inputs, max_length):
        targets, init_states = self.initialize(inputs, eval=False)
        emb, output, hidden, context = init_states

        outputs = []
        samples = []
        batch_size = targets.size(1)
        num_eos = targets[0].data.byte().new(batch_size).zero_()

        for i in range(max_length):
            output, hidden = self.decoder.step(emb, output, hidden, context)
            outputs.append(output)
            dist = F.softmax(self.generator(output))
            sample = dist.multinomial(1, replacement=False).view(-1).data
            samples.append(sample)

            # Stop if all sentences reach EOS.
            num_eos |= (sample == lib.Constants.EOS)
            if num_eos.sum() == batch_size: break

            emb = self.decoder.word_lut(Variable(sample))

        outputs = torch.stack(outputs)
        samples = torch.stack(samples)
        return samples, outputs
项目:bandit-nmt    作者:khanhptnk    | 项目源码 | 文件源码
def __getitem__(self, index):
        assert index < self.numBatches, "%d > %d" % (index, self.numBatches)
        srcBatch, lengths = self._batchify(self.src[index*self.batchSize:(index+1)*self.batchSize],
            include_lengths=True)

        tgtBatch = self._batchify(self.tgt[index*self.batchSize:(index+1)*self.batchSize])

        # within batch sort by decreasing length.
        indices = range(len(srcBatch))
        batch = zip(indices, srcBatch, tgtBatch)
        batch, lengths = zip(*sorted(zip(batch, lengths), key=lambda x: -x[1]))
        indices, srcBatch, tgtBatch = zip(*batch)

        def wrap(b):
            b = torch.stack(b, 0).t().contiguous()
            if self.cuda:
                b = b.cuda()
            b = Variable(b, volatile=self.eval)
            return b

        return (wrap(srcBatch), lengths), wrap(tgtBatch), indices
项目:textobjdetection    作者:andfoy    | 项目源码 | 文件源码
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for sample in batch:
        imgs.append(sample[0])
        targets.append(torch.FloatTensor(sample[1]))
    return torch.stack(imgs, 0), targets
项目:arc-pytorch    作者:sanyam5    | 项目源码 | 文件源码
def fetch_batch(self, part, batch_size: int = None):

        if batch_size is None:
            batch_size = self.batch_size

        X, Y = self._fetch_batch(part, batch_size)

        X = Variable(torch.from_numpy(X)).view(2*batch_size, self.image_size, self.image_size)

        X1 = X[:batch_size]  # (B, h, w)
        X2 = X[batch_size:]  # (B, h, w)

        X = torch.stack([X1, X2], dim=1)  # (B, 2, h, w)

        Y = Variable(torch.from_numpy(Y))

        if use_cuda:
            X, Y = X.cuda(), Y.cuda()

        return X, Y
项目:realtime-action-detection    作者:gurkirt    | 项目源码 | 文件源码
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).
    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations
    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """

    targets = []
    imgs = []
    image_ids = []
    for sample in batch:
        imgs.append(sample[0])
        targets.append(torch.FloatTensor(sample[1]))
        image_ids.append(sample[2])
    return torch.stack(imgs, 0), targets, image_ids
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def _forward_rnn(cell, input_, grads_, length, hx):
        max_time = input_.size(0)
        output = []
        for time in range(max_time):
            hx = cell(input_=input_[time],grads_=grads_[time], hx=hx)
            #mask = (time < length).float().unsqueeze(1).expand_as(h_next[0])
            #fS_next = h_next[0] * mask + hx[0] * (1 - mask)
            #iS_next = h_next[1] * mask + hx[1] * (1 - mask)
            #cS_next = h_next[2] * mask + hx[2] * (1 - mask)
            #deltaS_next = h_next[3] * mask + hx[3] * (1 - mask)
            #hx_next = (fS_next, iS_next, cS_next, deltaS_next)
            #output.append(h_next)
            #hx = hx_next
        #output = torch.stack(output, 0)
        #return output,hx
        #return hx[2],hx
        return hx
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def _forward_rnn(cell, input_, length, hx):
        max_time = input_.size(0)
        output = []
        for time in range(max_time):
            if isinstance(cell, BNLSTMCell):
                h_next, c_next = cell(input_=input_[time], hx=hx, time=time)
            else:
                h_next, c_next = cell(input_=input_[time], hx=hx)
            mask = (time < length).float().unsqueeze(1).expand_as(h_next)
            h_next = h_next*mask + hx[0]*(1 - mask)
            c_next = c_next*mask + hx[1]*(1 - mask)
            hx_next = (h_next, c_next)
            output.append(h_next)
            hx = hx_next
        output = torch.stack(output, 0)
        return output, hx
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def forward(self, input_, length=None, hx=None):
        if self.batch_first:
            input_ = input_.transpose(0, 1)
        max_time, batch_size, _ = input_.size()
        if length is None:
            length = Variable(torch.LongTensor([max_time] * batch_size))
            if input_.is_cuda:
                length = length.cuda()
        if hx is None:
            hx = Variable(input_.data.new(batch_size, self.hidden_size).zero_())
            hx = (hx, hx)
        h_n = []
        c_n = []
        layer_output = None
        for layer in range(self.num_layers):
            layer_output, (layer_h_n, layer_c_n) = LSTM._forward_rnn(
                cell=self.cells[layer], input_=input_, length=length, hx=hx)
            input_ = self.dropout_layer(layer_output)
            h_n.append(layer_h_n)
            c_n.append(layer_c_n)
        output = layer_output
        h_n = torch.stack(h_n, 0)
        c_n = torch.stack(c_n, 0)
        return output, (h_n, c_n)
项目:biaffineparser    作者:chantera    | 项目源码 | 文件源码
def forward(self, pretrained_word_tokens, word_tokens, pos_tokens):
        lengths = np.array([len(tokens) for tokens in word_tokens])
        X = self.forward_embed(
            pretrained_word_tokens, word_tokens, pos_tokens, lengths)
        indices = np.argsort(-np.array(lengths)).astype(np.int64)
        lengths = lengths[indices]
        X = torch.stack([X[idx] for idx in indices])
        X = nn.utils.rnn.pack_padded_sequence(X, lengths, batch_first=True)
        R = self.blstm(X)[0]
        R = nn.utils.rnn.pad_packed_sequence(R, batch_first=True)[0]
        R = R.index_select(dim=0, index=_model_var(
            self, torch.from_numpy(np.argsort(indices).astype(np.int64))))
        H_arc_head = self.mlp_arc_head(R)
        H_arc_dep = self.mlp_arc_dep(R)
        arc_logits = self.arc_biaffine(H_arc_dep, H_arc_head)
        arc_logits = torch.squeeze(arc_logits, dim=3)
        H_label_dep = self.mlp_label_dep(R)
        H_label_head = self.mlp_label_head(R)
        label_logits = self.label_biaffine(H_label_dep, H_label_head)
        return arc_logits, label_logits
项目:R-net    作者:matthew-z    | 项目源码 | 文件源码
def forward(self, inputs, hidden):
        def select_layer(h_state, i):  # To work on both LSTM / GRU, RNN
            if isinstance(h_state, tuple):
                return tuple([select_layer(s, i) for s in h_state])
            else:
                return h_state[i]

        next_hidden = []
        for i, layer in enumerate(self.layers):
            hidden_i = select_layer(hidden, i)
            next_hidden_i = layer(inputs, hidden_i)
            output = next_hidden_i[0] if isinstance(next_hidden_i, tuple) \
                else next_hidden_i
            if i + 1 != self.num_layers:
                output = self.dropout(output)
            if i > 0 and self.residual:
                inputs = output + inputs
            else:
                inputs = output
            next_hidden.append(next_hidden_i)
        if isinstance(hidden, tuple):
            next_hidden = tuple([torch.stack(h) for h in zip(*next_hidden)])
        else:
            next_hidden = torch.stack(next_hidden)
        return inputs, next_hidden
项目:R-net    作者:matthew-z    | 项目源码 | 文件源码
def eval(self):
        self.model.eval()
        pred_result = {}
        for _, batch in enumerate(self.dataloader_dev):

            question_ids, questions, passages, passage_tokenized = batch
            questions.variable(volatile=True)
            passages.variable(volatile=True)
            begin_, end_ = self.model(questions, passages)  # batch x seq

            _, pred_begin = torch.max(begin_, 1)
            _, pred_end = torch.max(end_, 1)

            pred = torch.stack([pred_begin, pred_end], dim=1)

            for i, (begin, end) in enumerate(pred.cpu().data.numpy()):
                ans = passage_tokenized[i][begin:end + 1]
                qid = question_ids[i]
                pred_result[qid] = " ".join(ans)
        self.model.train()
        return evaluate(self.dev_dataset, pred_result)
项目:MSDN    作者:yikang-li    | 项目源码 | 文件源码
def prepare_message(self, target_features, source_features, select_mat, gate_module):
        feature_data = []

        transfer_list = np.where(select_mat > 0)
        source_indices = Variable(torch.from_numpy(transfer_list[1]).type(torch.LongTensor)).cuda()
        target_indices = Variable(torch.from_numpy(transfer_list[0]).type(torch.LongTensor)).cuda()
        source_f = torch.index_select(source_features, 0, source_indices)
        target_f = torch.index_select(target_features, 0, target_indices)
        transferred_features = gate_module(target_f, source_f)

        for f_id in range(target_features.size()[0]):
            if len(np.where(select_mat[f_id, :] > 0)[0]) > 0:
                feature_indices = np.where(transfer_list[0] == f_id)[0]
                indices = Variable(torch.from_numpy(feature_indices).type(torch.LongTensor)).cuda()
                features = torch.index_select(transferred_features, 0, indices).mean(0).view(-1)
                feature_data.append(features)
            else:
                temp = Variable(torch.zeros(target_features.size()[1:]), requires_grad=True).type(torch.FloatTensor).cuda()
                feature_data.append(temp)
        return torch.stack(feature_data, 0)
项目:MSDN    作者:yikang-li    | 项目源码 | 文件源码
def baseline_search(self, input, beam_size=None):
        # This is the simple greedy search
        batch_size = input.size(0)
        hidden_feat = self.lstm_im(input.view(1, input.size()[0], input.size()[1]))[1]
        x = Variable(torch.ones(1, batch_size,).type(torch.LongTensor) * self.start, requires_grad=False).cuda() # <start>
        output = []
        flag = torch.ones(batch_size)
        for i in range(self.nseq):
            input_x = self.encoder(x.view(1, -1))
            output_feature, hidden_feat = self.lstm_word(input_x, hidden_feat)
            output_t = self.decoder(output_feature.view(-1, output_feature.size(2)))
            output_t = F.log_softmax(output_t)
            logprob, x = output_t.max(1)
            output.append(x)
            flag[x.cpu().eq(self.end).data] = 0
            if flag.sum() == 0:
                break
        output = torch.stack(output, 0).squeeze().transpose(0, 1).cpu().data
        return output
项目:NeuralMT    作者:hlt-mt    | 项目源码 | 文件源码
def getHyp(self, k):
        """
        Walk back to construct the full hypothesis.

        Parameters.

             * `k` - the position in the beam to construct.

         Returns.

            1. The hypothesis
            2. The attention at each time step.
        """
        hyp, attn = [], []
        for j in range(len(self.prevKs) - 1, -1, -1):
            hyp.append(self.nextYs[j+1][k])
            attn.append(self.attn[j][k])
            k = self.prevKs[j][k]

        return hyp[::-1], torch.stack(attn[::-1])
项目:alpha-dimt-icmlws    作者:sotetsuk    | 项目源码 | 文件源码
def forward(self, input, hidden, context, init_output):
        emb = self.word_lut(input)

        # n.b. you can increase performance if you compute W_ih * x for all
        # iterations in parallel, but that's only possible if
        # self.input_feed=False
        outputs = []
        output = init_output
        for emb_t in emb.split(1):
            emb_t = emb_t.squeeze(0)
            if self.input_feed:
                emb_t = torch.cat([emb_t, output], 1)

            output, hidden = self.rnn(emb_t, hidden)
            output, attn = self.attn(output, context.t())
            output = self.dropout(output)
            outputs += [output]

        outputs = torch.stack(outputs)
        return outputs, hidden, attn
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def safeCoalesce(self, t):
        tc = t.coalesce()

        value_map = {}
        for idx, val in zip(t._indices().t(), t._values()):
            idx_tup = tuple(idx)
            if idx_tup in value_map:
                value_map[idx_tup] += val
            else:
                value_map[idx_tup] = val.clone() if torch.is_tensor(val) else val

        new_indices = sorted(list(value_map.keys()))
        new_values = [value_map[idx] for idx in new_indices]
        if t._values().ndimension() < 2:
            new_values = t._values().new(new_values)
        else:
            new_values = torch.stack(new_values)

        new_indices = t._indices().new(new_indices).t()
        tg = t.new(new_indices, new_values, t.size())

        self.assertEqual(tc._indices(), tg._indices())
        self.assertEqual(tc._values(), tg._values())

        return tg
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def _forward_rnn(cell, input_, length, hx):
        max_time = input_.size(0)
        output = []
        for time in range(max_time):
            if isinstance(cell, BNLSTMCell):
                h_next, c_next = cell(input_=input_[time], hx=hx, time=time)
            else:
                h_next, c_next = cell(input_=input_[time], hx=hx)
            mask = (time < length).float().unsqueeze(1).expand_as(h_next)
            h_next = h_next*mask + hx[0]*(1 - mask)
            c_next = c_next*mask + hx[1]*(1 - mask)
            hx_next = (h_next, c_next)
            output.append(h_next)
            hx = hx_next
        output = torch.stack(output, 0)
        return output, hx
项目:benchmark    作者:pytorch    | 项目源码 | 文件源码
def forward(self, input, c0=None, return_hidden=True):
        assert input.dim() == 3 # (len, batch, n_in)
        dir_ = 2 if self.bidirectional else 1
        if c0 is None:
            zeros = Variable(input.data.new(
                input.size(1), self.n_out*dir_
            ).zero_())
            c0 = [ zeros for i in range(self.depth) ]
        else:
            assert c0.dim() == 3    # (depth, batch, n_out*dir_)
            c0 = [ x.squeeze(0) for x in c0.chunk(self.depth, 0) ]

        prevx = input
        lstc = []
        for i, rnn in enumerate(self.rnn_lst):
            h, c = rnn(prevx, c0[i])
            prevx = h
            lstc.append(c)

        if return_hidden:
            return prevx, torch.stack(lstc)
        else:
            return prevx
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def fit(self, X: Iterable[T1], y: Iterable[T2],
            X_test: Opt[Iterable[T1]]=None, y_test: Opt[Iterable[T2]]=None,
            batch_size: Opt[int]=None, shuffle: bool=False,
            max_epochs: int=1, min_epochs: int=1, criterion_window: int=5,
            max_training_time: Opt[float]=None,
            batch_report_interval: Opt[int]=None, epoch_report_interval: Opt[int]=None):
        """This method fits the *entire* pipeline, including input normalization. Initialization of weight/bias
        parameters in the torch_module is up to you; there is no obvious canonical way to do it here.
        Returns per-epoch losses and validation losses (if any)."""
        batch_size = batch_size or self.default_batch_size
        if self.should_normalize:
            sample, X = peek(X, self.norm_n_samples)
            if self.encode_input:
                sample = [self.encode_input(x) for x in sample]
            sample = stack(sample)
            self.estimate_normalization(sample)

        return self.update(X=X, y=y, X_test=X_test, y_test=y_test, batch_size=batch_size, shuffle=shuffle,
                           max_epochs=max_epochs, min_epochs=min_epochs,
                           criterion_window=criterion_window,
                           max_training_time=max_training_time,
                           batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def fit_zipped(self, dataset: Iterable[Tuple[T1, T2]], test_dataset: Opt[Iterable[Tuple[T1, T2]]]=None,
                   batch_size: Opt[int] = None,
                   max_epochs: int = 1, min_epochs: int = 1, criterion_window: int = 5,
                   max_training_time: Opt[float] = None,
                   batch_report_interval: Opt[int] = None, epoch_report_interval: Opt[int] = None):
        """For fitting to an iterable sequence of pairs, such as may arise in very large streaming datasets from sources
        that don't fit the random access and known-length requirements of a torch.data.Dataset (e.g. a sequence of
        sentences split from a set of text files as might arise in NLP applications.
        Like TorchModel.fit(), this estimates input normalization before the weight update, and weight initialization of
        the torch_module is up to you. Returns per-epoch losses and validation losses (if any).
        This method handles packaging X and y into a batch iterator of the kind that torch modules expect."""
        batch_size = batch_size or self.default_batch_size
        if self.should_normalize:
            sample, dataset = peek(dataset, self.norm_n_samples)
            sample = [t[0] for t in sample]
            if self.encode_input:
                sample = [self.encode_input(x) for x in sample]
            sample = stack(sample)
            self.estimate_normalization(sample)

        return self.update_zipped(dataset=dataset, test_dataset=test_dataset, batch_size=batch_size,
                                  max_epochs=max_epochs, min_epochs=min_epochs,
                                  criterion_window=criterion_window,
                                  max_training_time=max_training_time,
                                  batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
项目:dong_iccv_2017    作者:woozzu    | 项目源码 | 文件源码
def _get_word_vectors(self, desc, word_embedding):
        output = []
        len_desc = []
        for i in range(desc.shape[1]):
            words = self._nums2chars(desc[:, i])
            words = split_sentence_into_words(words)
            word_vecs = torch.Tensor([word_embedding[w] for w in words])
            # zero padding
            if len(words) < self.max_word_length:
                word_vecs = torch.cat((
                    word_vecs,
                    torch.zeros(self.max_word_length - len(words), word_vecs.size(1))
                ))
            output.append(word_vecs)
            len_desc.append(len(words))
        return torch.stack(output), len_desc
项目:ssd_pytorch    作者:miraclebiu    | 项目源码 | 文件源码
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for _, sample in enumerate(batch):
        for _, tup in enumerate(sample):
            #pdb.set_trace()
            if torch.is_tensor(tup):
                imgs.append(tup)
            elif isinstance(tup, type([])):
                annos = [torch.Tensor(a) for a in tup]
                #pdb.set_trace()
                targets.append(torch.stack(annos, 0))

    return (torch.stack(imgs, 0), targets)
项目:Seq2Seq-PyTorch    作者:MaximumEntropy    | 项目源码 | 文件源码
def forward(self, input, hidden, ctx, ctx_mask=None):
        """Propogate input through the layer."""
        h_0, c_0 = hidden
        h_1, c_1 = [], []
        for i, layer in enumerate(self.layers):
            if ctx_mask is not None:
                ctx_mask = torch.ByteTensor(
                    ctx_mask.data.cpu().numpy().astype(np.int32).tolist()
                ).cuda()
            output, (h_1_i, c_1_i) = layer(input, (h_0, c_0), ctx, ctx_mask)

            input = output

            if i != len(self.layers):
                input = self.dropout(input)

            h_1 += [h_1_i]
            c_1 += [c_1_i]

        h_1 = torch.stack(h_1)
        c_1 = torch.stack(c_1)

        return input, (h_1, c_1)
项目:yolov2    作者:zhangkaij    | 项目源码 | 文件源码
def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations

    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for sample in batch:
        imgs.append(sample[0])
        targets.append(torch.FloatTensor(sample[1]))
    return torch.stack(imgs, 0), targets
项目:MatchingNetworks    作者:gitabcworld    | 项目源码 | 文件源码
def forward(self, support_set, input_image):

        """
        Produces pdfs over the support set classes for the target set image.
        :param support_set: The embeddings of the support set images, tensor of shape [sequence_length, batch_size, 64]
        :param input_image: The embedding of the target image, tensor of shape [batch_size, 64]
        :return: Softmax pdf. Tensor with cosine similarities of shape [batch_size, sequence_length]
        """
        eps = 1e-10
        similarities = []
        for support_image in support_set:
            sum_support = torch.sum(torch.pow(support_image, 2), 1)
            support_magnitude = sum_support.clamp(eps, float("inf")).rsqrt()
            dot_product = input_image.unsqueeze(1).bmm(support_image.unsqueeze(2)).squeeze()
            cosine_similarity = dot_product * support_magnitude
            similarities.append(cosine_similarity)
        similarities = torch.stack(similarities)
        return similarities
项目:seq2seq.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def forward(self, inputs, hidden):
        def select_layer(h_state, i):  # To work on both LSTM / GRU, RNN
            if isinstance(h_state, tuple):
                return tuple([select_layer(s, i) for s in h_state])
            else:
                return h_state[i]

        next_hidden = []
        for i, layer in enumerate(self.layers):
            next_hidden_i = layer(inputs, select_layer(hidden, i))
            output = next_hidden_i[0] if isinstance(next_hidden_i, tuple) \
                else next_hidden_i
            if i + 1 != self.num_layers:
                output = self.dropout(output)
            if self.residual:
                inputs = output + inputs
            else:
                inputs = output
            next_hidden.append(next_hidden_i)
        if isinstance(hidden, tuple):
            next_hidden = tuple([torch.stack(h) for h in zip(*next_hidden)])
        else:
            next_hidden = torch.stack(next_hidden)
        return inputs, next_hidden
项目:pytorch-seq2seq    作者:rowanz    | 项目源码 | 文件源码
def _sample(self, state, context, mask, max_len=20):
        """
        Performs sampling
        """
        batch_size = state.size(0)

        toks = [const_row(self.bos_token, batch_size, volatile=True)]

        lens = torch.IntTensor(batch_size)
        if torch.cuda.is_available():
            lens = lens.cuda()

        for l in range(max_len + 1):  # +1 because of EOS
            out, state, alpha = self._lstm_loop(state, self.embedding(toks[-1]), context, mask)

            # Do argmax (since we're doing greedy decoding)
            toks.append(out.max(1)[1].squeeze(1))

            lens[(toks[-1].data == self.eos_token) & (lens == 0)] = l+1
            if all(lens):
                break
        lens[lens == 0] = max_len+1
        return torch.stack(toks, 0), lens
项目:Text-Summarization    作者:hashbangCoder    | 项目源码 | 文件源码
def evalPreproc(self, sample):   
        # sample length = 1
        # limit max article size to 400 tokens     
        extIntArticles, intRevArticles = [], []
        max_article_oov = 0        
        article = sample['article'].split(' ')                  
        # get article  int-tokenized
        _intArticle, _extIntArticle, article_oov, _ = self.makeEncoderInput(article)
        if max_article_oov < len(article_oov):
            max_article_oov = len(article_oov)
        _intRevArticle = list(reversed(_intArticle))
        # _intAbstract, _extIntAbstract, abs_len = self.makeDecoderInput(abstract, article_oov)

        extIntArticles.append(_extIntArticle)            
        intRevArticles.append(_intRevArticle)

        padExtArticles = [torch.LongTensor(item) for item in extIntArticles]        
        padRevArticles = [torch.LongTensor(item) for item in intRevArticles]                

        batchExtArticles = torch.stack(padExtArticles, 0)
        # replace temp ids with unk token id for enc input
        batchArticles = batchExtArticles.clone().masked_fill_((batchExtArticles > self.vocabSize), self.word2id['<unk>'])
        batchRevArticles = torch.stack(padRevArticles, 0)

        return batchArticles, batchRevArticles, batchExtArticles, max_article_oov, article_oov, sample['article'], sample['abstract']
项目:Text-Summarization    作者:hashbangCoder    | 项目源码 | 文件源码
def getInputTextSample(self, tokenized_text):
        extIntArticles, intRevArticles = [], []
        max_article_oov = 0        
        # get article  int-tokenized
        _intArticle, _extIntArticle, article_oov, _ = self.makeEncoderInput(tokenized_text)
        if max_article_oov < len(article_oov):
            max_article_oov = len(article_oov)
        _intRevArticle = list(reversed(_intArticle))

        extIntArticles.append(_extIntArticle)            
        intRevArticles.append(_intRevArticle)

        padExtArticles = [torch.LongTensor(item) for item in extIntArticles]        
        padRevArticles = [torch.LongTensor(item) for item in intRevArticles]                

        batchExtArticles = torch.stack(padExtArticles, 0)
        # replace temp ids with unk token id for enc input
        batchArticles = batchExtArticles.clone().masked_fill_((batchExtArticles > self.vocabSize), self.word2id['<unk>'])
        batchRevArticles = torch.stack(padRevArticles, 0)

        return batchArticles, batchRevArticles, batchExtArticles, max_article_oov, article_oov
项目:qpth    作者:locuslab    | 项目源码 | 文件源码
def solve_kkt(Ks, K, Ktildes, Ktilde,
              rx, rs, rz, ry, niter=1):
    nBatch = len(Ks)
    nz = rx.size(1)
    nineq = rz.size(1)
    neq = ry.size(1)

    r = -torch.cat((rx, rs, rz, ry), 1)

    l = torch.spbqrfactsolve(*([r] + Ktilde))
    res = torch.stack([r[i] - torch.mm(Ks[i], l[i].unsqueeze(1))
                       for i in range(nBatch)])
    for k in range(niter):
        d = torch.spbqrfactsolve(*([res] + Ktilde))
        l = l + d
        res = torch.stack([r[i] - torch.mm(Ks[i], l[i].unsqueeze(1))
                           for i in range(nBatch)])

    solx = l[:, :nz]
    sols = l[:, nz:nz + nineq]
    solz = l[:, nz + nineq:nz + 2 * nineq]
    soly = l[:, nz + 2 * nineq:nz + 2 * nineq + neq]

    return solx, sols, solz, soly
项目:loop    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self, src, trgt, spkr, seq_len):
        self.seq_len = seq_len
        self.start = True

        self.speakers = spkr
        self.srcBatch = src[0]
        self.srcLenths = src[1]

        # split batch
        self.tgtBatch = list(torch.split(trgt[0], self.seq_len, 0))
        self.tgtBatch.reverse()
        self.len = len(self.tgtBatch)

        # split length list
        batch_seq_len = len(self.tgtBatch)
        self.tgtLenths = [self.split_length(l, batch_seq_len) for l in trgt[1]]
        self.tgtLenths = torch.stack(self.tgtLenths)
        self.tgtLenths = list(torch.split(self.tgtLenths, 1, 1))
        self.tgtLenths = [x.squeeze() for x in self.tgtLenths]
        self.tgtLenths.reverse()

        assert len(self.tgtLenths) == len(self.tgtBatch)
项目:self-critical.pytorch    作者:ruotianluo    | 项目源码 | 文件源码
def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
        prev_h = state[0][-1]
        att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)

        h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))

        att = self.attention(h_att, att_feats, p_att_feats)

        lang_lstm_input = torch.cat([att, h_att], 1)
        # lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????

        h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))

        output = F.dropout(h_lang, self.drop_prob_lm, self.training)
        state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))

        return output, state
项目:examples    作者:pytorch    | 项目源码 | 文件源码
def forward(self, input, future = 0):
        outputs = []
        h_t = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
        c_t = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
        h_t2 = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
        c_t2 = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)

        for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
            h_t, c_t = self.lstm1(input_t, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        for i in range(future):# if we should predict the future
            h_t, c_t = self.lstm1(output, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        outputs = torch.stack(outputs, 1).squeeze(2)
        return outputs
项目:faster-rcnn.pytorch    作者:jwyang    | 项目源码 | 文件源码
def bbox_transform(ex_rois, gt_rois):
    ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
    ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
    ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
    ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights

    gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
    gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
    gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
    gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights

    targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
    targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
    targets_dw = torch.log(gt_widths / ex_widths)
    targets_dh = torch.log(gt_heights / ex_heights)

    targets = torch.stack(
        (targets_dx, targets_dy, targets_dw, targets_dh),1)

    return targets
项目:URNN-PyTorch    作者:jingli9111    | 项目源码 | 文件源码
def forward(self, input_, length=None, hx=None):
        if self.batch_first:
            input_ = input_.transpose(0, 1)
        max_time, batch_size, _ = input_.size()
        if length is None:
            length = Variable(torch.LongTensor([max_time] * batch_size))
            if input_.is_cuda:
                length = length.cuda()
        if hx is None:
            hx = Variable(input_.data.new(batch_size, self.hidden_size).zero_())

        h_n = []
        layer_output = None
        for layer in range(self.num_layer):
            layer_output, layer_h_n = GORU._forward_rnn(
                cell=self.cells[layer], input_ = input_, length=length, hx =hx)
            input_ = self.dropout_layer(layer_output)
            h_n.append(layer_h_n)
        output=layer_output
        h_n = torch.stack(h_n, 0)   
        return output, h_n
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def argmax(self, z, max_len):
        # local variables
        eos, bos = self.src_dict.get_eos(), self.src_dict.get_bos()
        batch = z.size(0)
        # output variables
        scores, preds, mask = 0, [], z.data.new(batch).long() + 1
        # model inputs
        hidden = self.decoder.init_hidden_for(z)
        prev = Variable(z.data.new(batch).zero_().long() + bos, volatile=True)

        for _ in range(max_len):
            prev_emb = self.embeddings(prev).squeeze(0)
            dec_out, hidden = self.decoder(prev_emb, hidden, z=z)
            dec_out = self.project(dec_out.unsqueeze(0))

            score, pred = dec_out.max(1)
            scores += score.squeeze().data
            preds.append(pred.squeeze().data)
            prev = pred

            mask = mask * (pred.squeeze().data[0] != eos)
            if mask.int().sum() == 0:
                break

        return scores.tolist(), torch.stack(preds).transpose(0, 1).tolist()
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def forward(self, outs, emb):
        """
        Runs attention for a given input sequence

        Returns: output, weights
        --------
        output: torch.Tensor (seq_len x batch_size x hid_dim)
        weights: list of torch.Tensor(batch_size x 0:t-1) of length seq_len
        """
        emb_att = self.attn.project_emb(emb)
        output, weights = [], []
        for idx, hid in enumerate(outs):
            t = max(0, idx-1)  # use same hid at t=0
            context, weight = self.attn(
                outs[t], emb[:max(1, t)], emb_att=emb_att[:max(1, t)])
            output.append(self.hid2hid(hid) + self.emb2hid(context))
            weights.append(weight)
        return torch.stack(output), weights