Python torch 模块,chunk() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用torch.chunk()

项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def get_std(self, num_items, vars, expectation):

        num_pairs = 0
        std_sum = 0.0

        # If self distance computed std for top and bottom half
        if self.use_self_distance:
            for i in xrange(num_items):
                var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
                std_sum += np.square(self.as_np(self.distance(var_half_1, var_half_2)) - expectation)
            return np.sqrt(std_sum / num_items)

        # Otherwise compute std for all pairs of images
        for i in xrange(num_items - 1):
            for j in xrange(i + 1, num_items):
                num_pairs += 1
                std_sum += np.square(self.as_np(self.distance(vars[i], vars[j])) - expectation)

        return np.sqrt(std_sum / num_pairs)
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def get_expectation(self, num_items, vars):

        num_pairs = 0
        distance_sum = 0.0

        # If self distance computed expectation for top and bottom half
        if self.use_self_distance:
            for i in xrange(num_items):
                # Split image to top and bottom half
                var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
                distance_sum += self.as_np(self.distance(var_half_1, var_half_2))
            return distance_sum / num_items

        # Otherwise compute expectation for all pairs of images
        for i in xrange(num_items - 1):
            for j in xrange(i + 1, num_items):
                num_pairs += 1
                distance_sum += self.as_np(self.distance(vars[i], vars[j]))

        return distance_sum / num_pairs
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def get_std(self, num_items, vars, expectation):

        num_pairs = 0
        std_sum = 0.0

        # If self distance computed std for top and bottom half
        if self.use_self_distance:
            for i in xrange(num_items):
                var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
                std_sum += np.square(self.as_np(self.distance(var_half_1, var_half_2)) - expectation)
            return np.sqrt(std_sum / num_items)

        # Otherwise compute std for all pairs of images
        for i in xrange(num_items - 1):
            for j in xrange(i + 1, num_items):
                num_pairs += 1
                std_sum += np.square(self.as_np(self.distance(vars[i], vars[j])) - expectation)

        return np.sqrt(std_sum / num_pairs)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def chunk(self, n_chunks, dim=0):
        """Splits this tensor into a tuple of tensors.

        See :func:`torch.chunk`.
        """
        return torch.chunk(self, n_chunks, dim)
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def get_self_distances(self):

        A_half_1, A_half_2 = torch.chunk(self.real_A, 2, dim=2)
        B_half_1, B_half_2 = torch.chunk(self.real_B, 2, dim=2)
        AB_half_1, AB_half_2 = torch.chunk(self.fake_B, 2, dim=2)
        BA_half_1, BA_half_2 = torch.chunk(self.fake_A, 2, dim=2)

        l_distance_A, l_distance_B = \
            self.get_individual_distance_loss(A_half_1, A_half_2,
                                              AB_half_1, AB_half_2,
                                              B_half_1, B_half_2,
                                              BA_half_1, BA_half_2)

        return l_distance_A, l_distance_B
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def get_self_distances(self, A, AB, A_to_AB=True):

        A_half_1, A_half_2 = torch.chunk(A, 2, dim=2)
        AB_half_1, AB_half_2 = torch.chunk(AB, 2, dim=2)

        l_distance_A = \
            self.get_individual_distance_loss(A_half_1, A_half_2,
                                              AB_half_1, AB_half_2, A_to_AB)

        return l_distance_A
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def forward(self,  # pylint: disable=arguments-differ
                inputs: torch.Tensor) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
        """
        Parameters
        ----------
        inputs: ``torch.autograd.Variable``
            Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.

        Returns
        -------
        Dict with keys:

        ``'activations'``: ``List[torch.autograd.Variable]``
            A list of activations at each layer of the network, each of shape
            ``(batch_size, timesteps + 2, embedding_dim)``
        ``'mask'``:  ``torch.autograd.Variable``
            Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.

        Note that the output tensors all include additional special begin and end of sequence
        markers.
        """
        token_embedding = self._token_embedder(inputs)
        type_representation = token_embedding['token_embedding']
        mask = token_embedding['mask']
        lstm_outputs = self._elmo_lstm(type_representation, mask)

        # Prepare the output.  The first layer is duplicated.
        output_tensors = [
                torch.cat([type_representation, type_representation], dim=-1)
        ]
        for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):
            output_tensors.append(layer_activations.squeeze(0))

        return {
                'activations': output_tensors,
                'mask': mask,
        }
项目:FastNeuralStyle    作者:bengxy    | 项目源码 | 文件源码
def X2img(X, image_name, mod='rgb'):
    if mod=='bgr':
        (b,g,r) = torch.chunk(X, 3)
        X = torch.cat((r,g,b))
    img = X.clone().cpu().clamp(0,255).numpy()
    img = img.transpose(1,2,0).astype('uint8')
    img = Image.fromarray(img)
    img.save(image_name)

# load image
项目:FastNeuralStyle    作者:bengxy    | 项目源码 | 文件源码
def excg_rgb_bgr(batch):
    batch=batch.transpose(0,1)
    (r,g,b) = torch.chunk(batch, 3)
    batch = torch.cat((b,g,r))
    batch = batch.transpose(0,1)
    return batch

# Save model
项目:pytorch-tutorial    作者:yunjey    | 项目源码 | 文件源码
def forward(self, x):
        h = self.encoder(x)
        mu, log_var = torch.chunk(h, 2, dim=1)  # mean and log variance.
        z = self.reparametrize(mu, log_var)
        out = self.decoder(z)
        return out, mu, log_var
项目:fast-neural-style    作者:abhiskk    | 项目源码 | 文件源码
def tensor_save_bgrimage(tensor, filename, cuda=False):
    (b, g, r) = torch.chunk(tensor, 3)
    tensor = torch.cat((r, g, b))
    tensor_save_rgbimage(tensor, filename, cuda)
项目:fast-neural-style    作者:abhiskk    | 项目源码 | 文件源码
def preprocess_batch(batch):
    batch = batch.transpose(0, 1)
    (r, g, b) = torch.chunk(batch, 3)
    batch = torch.cat((b, g, r))
    batch = batch.transpose(0, 1)
    return batch
项目:pytorch-fast-neural-style    作者:vishal1796    | 项目源码 | 文件源码
def batch_rgb_to_bgr(batch):
    batch = batch.transpose(0, 1)
    (r, g, b) = torch.chunk(batch, 3)
    batch = torch.cat((b, g, r))
    batch = batch.transpose(0, 1)
    return batch


# load image in RGB CxHxW [0,255]
项目:Deep-learning-with-cats    作者:AlexiaJM    | 项目源码 | 文件源码
def RGB_to_BGR(batch):
    batch = batch.transpose(0, 1)
    (r, g, b) = torch.chunk(batch, 3)
    batch = torch.cat((b, g, r))
    batch = batch.transpose(0, 1)
    return batch

## Soon to be variables
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def chunk(self, n_chunks, dim=0):
        """Splits this tensor into a tuple of tensors.

        See :func:`torch.chunk`.
        """
        return torch.chunk(self, n_chunks, dim)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def chunk(self, n_chunks, dim=0):
        """Splits this tensor into a tuple of tensors.

        See :func:`torch.chunk`.
        """
        return torch.chunk(self, n_chunks, dim)
项目:pytorch-dynamic-batching    作者:mrdrozdov-github    | 项目源码 | 文件源码
def forward(self, x, lengths):
        batch_size = len(x)
        lengths = [len(s) for s in x]

        outputs = [Variable(torch.zeros(1, self.model_dim).float(), volatile=not self.training)
                   for _ in range(batch_size)]

        for t in range(max(lengths)):
            batch = []
            h = []
            idx = []
            for i, (s, l) in enumerate(zip(x, lengths)):
                if l >= max(lengths) - t:
                    batch.append(s.pop())
                    h.append(outputs[i])
                    idx.append(i)

            batch = np.concatenate(np.array(batch).reshape(-1, 1), 0)
            emb = Variable(torch.from_numpy(self.initial_embeddings.take(batch, 0)), volatile=not self.training)
            h = torch.cat(h, 0)
            h_next = self.rnn(emb, h)
            h_next = torch.chunk(h_next, len(idx))

            for i, o in zip(idx, h_next):
                outputs[i] = o

        outputs = torch.cat(outputs, 0)
        h = F.relu(self.l0(F.dropout(outputs, 0.5, self.training)))
        h = F.relu(self.l1(F.dropout(h, 0.5, self.training)))
        y = F.log_softmax(h)
        return y
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def chunk(self, n_chunks, dim=0):
        r"""Splits this tensor into a certain number of tensor chunks.

        See :func:`torch.chunk`.
        """
        return torch.chunk(self, n_chunks, dim)
项目:repeval_rivercorners    作者:jabalazs    | 项目源码 | 文件源码
def forward(self, lstm_out, lengths):
        """

        Args:
            lstm_out: A Variable containing a 3D tensor of dimension
                (seq_len, batch_size, hidden_x_dirs)
            lengths: A Variable containing 1D LongTensor of dimension
                (batch_size)

        Return:
            A Variable containing a 2D tensor of the same type as lstm_out of
            dim (batch_size, hidden_x_dirs) corresponding to the concatenated
            last hidden states of the forward and backward parts of the input.
        """

        seq_len = lstm_out.size(0)
        batch_size = lstm_out.size(1)
        hidden_x_dirs = lstm_out.size(2)
        single_dir_hidden = hidden_x_dirs / 2

        lengths_fw = lengths
        lengths_bw = seq_len - lengths_fw

        rep_lengths_fw = lengths_fw.view(1, batch_size, 1)
        rep_lengths_fw = rep_lengths_fw.repeat(1, 1, single_dir_hidden)

        rep_lengths_bw = lengths_bw.view(1, batch_size, 1)
        rep_lengths_bw = rep_lengths_bw.repeat(1, 1, single_dir_hidden)

        # we want 2 chunks in the last dimension
        out_fw, out_bw = torch.chunk(lstm_out, 2, 2)

        h_t_fw = torch.gather(out_fw, 0, rep_lengths_fw-1)
        h_t_bw = torch.gather(out_bw, 0, rep_lengths_bw)

        # -> (batch_size, hidden_x_dirs)
        last_hidden_out = torch.cat([h_t_fw, h_t_bw], 2).squeeze()
        return last_hidden_out
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def decode(self, z):
        zcode = list(torch.chunk(z, self.code_dims[0], dim=1))[::-1]
        h = self.act(self.fc1(zcode[0]))
        for z, fc in zip(zcode[1:], self.decode_layers):
            h = fc(h, z)
        return self.fc2(h)
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def decode(self, z):
        zcode = list(torch.chunk(z, self.code_dims[0], dim=1))[::-1]
        h = self.act(self.fc1(zcode[0]))
        for z, fc in zip(zcode[1:], self.decoder_list):
            h = fc(h, z)
        return self.fc2(h)
项目:pytorch-tools    作者:nearai    | 项目源码 | 文件源码
def apply(self, nn, nodes):
        """Apply current fold to given neural module."""
        values = {}
        for step in sorted(self.steps.keys()):
            values[step] = {}
            for op in self.steps[step]:
                func = getattr(nn, op)
                try:
                    batched_args = self._batch_args(
                        zip(*self.steps[step][op]), values)
                except Exception:
                    print("Error while executing node %s[%d] with args: %s" % (
                        op, step, self.steps[step][op]))
                    raise
                if batched_args:
                    arg_size = batched_args[0].size()[0]
                else:
                    arg_size = 1
                res = func(*batched_args)
                if isinstance(res, (tuple, list)):
                    values[step][op] = []
                    for x in res:
                        values[step][op].append(torch.chunk(x, arg_size))
                else:
                    values[step][op] = torch.chunk(res, arg_size)
        try:
            return self._batch_args(nodes, values)
        except Exception:
            print("Retrieving %s" % nodes)
            for lst in nodes:
                if isinstance(lst[0], Fold.Node):
                    print(', '.join([str(x.get(values).size()) for x in lst]))
            raise
项目:multiNLI_encoder    作者:easonnie    | 项目源码 | 文件源码
def text_conv1d(inputs, l1, conv_filter: nn.Linear, k_size, dropout=None, list_in=False,
                gate_way=True):
    """
    :param inputs: [T * B * D] 
    :param l1:  [B]
    :param conv_filter:  [k * D_in, D_out * 2]
    :param k_size:  
    :param dropout: 
    :param padding: 
    :param list_in: 
    :return: 
    """
    k = k_size
    batch_size = l1.size(0)
    d_in = inputs.size(2) if not list_in else inputs[0].size(1)
    unit_d = conv_filter.out_features // 2
    pad_n = (k - 1) // 2

    zeros_padding = Variable(inputs[0].data.new(pad_n, d_in).zero_())

    batch_list = []
    input_list = []
    for b_i in range(batch_size):
        masked_in = inputs[:l1[b_i], b_i, :] if not list_in else inputs[b_i]
        if gate_way:
            input_list.append(masked_in)

        b_inputs = torch.cat([zeros_padding, masked_in, zeros_padding], dim=0)
        for i in range(l1[b_i]):
            # print(b_inputs[i:i+k])
            batch_list.append(b_inputs[i:i+k].view(k * d_in))

    batch_in = torch.stack(batch_list, dim=0)
    a, b = torch.chunk(conv_filter(batch_in), 2, 1)
    out = a * F.sigmoid(b)

    out_list = []
    start = 0
    for b_i in range(batch_size):
        if gate_way:
            out_list.append(torch.cat((input_list[b_i], out[start:start + l1[b_i]]), dim=1))
        else:
            out_list.append(out[start:start + l1[b_i]])

        start = start + l1[b_i]

    # max_out_list = []
    # for b_i in range(batch_size):
    #     max_out, _ = torch.max(out_list[b_i], dim=0)
    #     max_out_list.append(max_out)
    # max_out = torch.cat(max_out_list, 0)
    #
    # print(out_list)

    return out_list
项目:pytorch-dynamic-batching    作者:mrdrozdov-github    | 项目源码 | 文件源码
def forward(self, x, lengths):
        batch_size = x.size(0)
        max_len = max(lengths)

        emb = Variable(torch.from_numpy(
            self.initial_embeddings.take(x.numpy(), 0)),
            volatile=not self.training)

        outputs = [Variable(torch.zeros(batch_size, self.model_dim).float(), volatile=not self.training)]

        for t in range(max_len):
            choose = torch.ByteTensor(batch_size)
            indices = []
            not_indices = []
            for i, l in enumerate(lengths):
                if l >= max(lengths) - t:
                    indices.append(i)
                    choose[i] = 1
                else:
                    not_indices.append(i)
                    choose[i] = 0

            # Build batch.
            batch = torch.index_select(emb[:,t,:], 0, Variable(torch.LongTensor(indices), volatile=not self.training))
            h_prev = torch.index_select(outputs[-1], 0, Variable(torch.LongTensor(indices), volatile=not self.training))
            h_next = self.rnn(batch, h_prev)

            # Some preparation for output for next step.
            if len(not_indices) > 0:
                not_h_prev = torch.index_select(outputs[-1], 0, Variable(torch.LongTensor(not_indices), volatile=not self.training))
                _not_h_prev = torch.chunk(not_h_prev, len(not_indices))
            _h_next = torch.chunk(h_next, len(indices))

            # Make variable for next step.
            _h = []
            _h_next_idx = 0
            _not_h_prev_idx = 0
            for c in choose:
                if c == 1:
                    _h.append(_h_next[_h_next_idx])
                    _h_next_idx += 1
                else:
                    _h.append(_not_h_prev[_not_h_prev_idx])
                    _not_h_prev_idx += 1
            h = torch.cat(_h, 0)

            outputs.append(h)

        hn = outputs[-1]
        h = F.relu(self.l0(F.dropout(hn, 0.5, self.training)))
        h = F.relu(self.l1(F.dropout(h, 0.5, self.training)))
        y = F.log_softmax(h)
        return y