Python torch 模块,from_numpy() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.from_numpy()

项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def pad_batch(mini_batch):
    mini_batch_size = len(mini_batch)
#     print mini_batch.shape
#     print mini_batch
    max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
    max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
#     print max_sent_len1, max_sent_len2
#     max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
    main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
    main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[0]):
            try:
                main_matrix1[i,j] = j
            except IndexError:
                pass
    for idx1, i in enumerate(mini_batch):
        for idx2, j in enumerate(i[1]):
            try:
                main_matrix2[i,j] = j
            except IndexError:
                pass
    main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
    main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
#     print main_matrix1_t.size()
#     print main_matrix2_t.size()
    return [main_matrix1_t, main_matrix2_t]
#     return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))

# def pad_batch(mini_batch):
# #     print mini_batch
# #     print type(mini_batch)
# #     print mini_batch.shape
# #     for i, _ in enumerate(mini_batch):
# #         print i, _
#     return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def pull_item(self, index):
        img_id = self.ids[index]

        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id)
        height, width, channels = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target, width, height)

        if self.transform is not None:
            target = np.array(target)
            img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
            # to rgb
            img = img[:, :, (2, 1, 0)]
            # img = img.transpose(2, 0, 1)
            target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
        return torch.from_numpy(img).permute(2, 0, 1), target, height, width
        # return torch.from_numpy(img), target, height, width
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl[lbl==255] = 0
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl = self.encode_segmap(lbl)
        classes = np.unique(lbl)
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)
        assert(np.all(classes == np.unique(lbl)))

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
项目:DeepLearning_PlantDiseases    作者:MarkoArsenovic    | 项目源码 | 文件源码
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):    
    img = np.copy(image)
    height, width,_= img.shape
    output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
    output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
    ocludedImages=[]
    for h in range(output_height):
        for w in range(output_width):
            #occluder region
            h_start = h*occluding_stride
            w_start = w*occluding_stride
            h_end = min(height, h_start + occluding_size)
            w_end = min(width, w_start + occluding_size)

            input_image = copy.copy(img)
            input_image[h_start:h_end,w_start:w_end,:] =  0
            ocludedImages.append(preprocess(Image.fromarray(input_image)))

    L = np.empty(output_height*output_width)
    L.fill(groundTruth)
    L = torch.from_numpy(L)
    tensor_images = torch.stack([img for img in ocludedImages])
    dataset = torch.utils.data.TensorDataset(tensor_images,L) 
    dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8) 

    heatmap=np.empty(0)
    model.eval()
    for data in dataloader:
        images, labels = data

        if use_gpu:
            images, labels = (images.cuda()), (labels.cuda(async=True))

        outputs = model(Variable(images))
        m = nn.Softmax()
        outputs=m(outputs)
        if use_gpu:   
            outs=outputs.cpu()
        heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))

    return heatmap.reshape((output_height, output_width))
项目:pytorch-nec    作者:mjacar    | 项目源码 | 文件源码
def get_screen(self):
    screen = self.env.render(mode='rgb_array').transpose(
        (2, 0, 1))  # transpose into torch order (CHW)
    # Strip off the top and bottom of the screen
    screen = screen[:, 160:320]
    view_width = 320
    cart_location = self.get_cart_location()
    if cart_location < view_width // 2:
        slice_range = slice(view_width)
    elif cart_location > (self.screen_width - view_width // 2):
        slice_range = slice(-view_width, None)
    else:
        slice_range = slice(cart_location - view_width // 2,
                            cart_location + view_width // 2)
    # Strip off the edges, so that we have a square image centered on a cart
    screen = screen[:, :, slice_range]
    # Convert to float, rescare, convert to torch tensor
    # (this doesn't require a copy)
    screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
    screen = torch.from_numpy(screen)
    # Resize, and add a batch dimension (BCHW)
    return self.resize(screen).numpy()
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def add(self, outputs, targets):
        outputs = to_numpy(outputs)
        targets = to_numpy(targets)

        if np.ndim(targets) == 2:
            targets = np.argmax(targets, 1)

        assert np.ndim(outputs) == 2, 'wrong output size (2D expected)'
        assert np.ndim(targets) == 1, 'wrong target size (1D or 2D expected)'
        assert targets.shape[0] == outputs.shape[0], 'number of outputs and targets do not match'

        top_k = self.top_k
        max_k = int(top_k[-1])

        predict = torch.from_numpy(outputs).topk(max_k, 1, True, True)[1].numpy()
        correct = (predict == targets[:, np.newaxis].repeat(predict.shape[1], 1))

        self.size += targets.shape[0]
        for k in top_k:
            self.corrects[k] += correct[:, :k].sum()
项目:speed    作者:keon    | 项目源码 | 文件源码
def train(e, model, opt, dataset, arg, cuda=False):
    model.train()
    criterion = nn.MSELoss()
    losses = []

    batcher = dataset.get_batcher(shuffle=True, augment=True)
    for b, (x, y) in enumerate(batcher, 1):
        x = V(th.from_numpy(x).float()).cuda()
        y = V(th.from_numpy(y).float()).cuda()
        opt.zero_grad()
        logit = model(x)
        loss = criterion(logit, y)
        loss.backward()
        opt.step()

        losses.append(loss.data[0])
        if arg.verbose and b % 50 == 0:
            loss_t = np.mean(losses[:-49])
            print('[train] [e]:%s [b]:%s - [loss]:%s' % (e, b, loss_t))
    return losses
项目:speed    作者:keon    | 项目源码 | 文件源码
def validate(models, dataset, arg, cuda=False):
    criterion = nn.MSELoss()
    losses = []
    batcher = dataset.get_batcher(shuffle=True, augment=False)
    for b, (x, y) in enumerate(batcher, 1):
        x = V(th.from_numpy(x).float()).cuda()
        y = V(th.from_numpy(y).float()).cuda()
        # Ensemble average
        logit = None
        for model, _ in models:
            model.eval()
            logit = model(x) if logit is None else logit + model(x)
        logit = th.div(logit, len(models))
        loss = criterion(logit, y)
        losses.append(loss.data[0])
    return np.mean(losses)
项目:speed    作者:keon    | 项目源码 | 文件源码
def predict(models, dataset, arg, cuda=False):
    prediction_file = open('save/predictions.txt', 'w')
    batcher = dataset.get_batcher(shuffle=False, augment=False)
    for b, (x, _) in enumerate(batcher, 1):
        x = V(th.from_numpy(x).float()).cuda()
        # Ensemble average
        logit = None
        for model, _ in models:
            model.eval()
            logit = model(x) if logit is None else logit + model(x)
        logit = th.div(logit, len(models))
        prediction = logit.cpu().data[0][0]
        prediction_file.write('%s\n' % prediction)
        if arg.verbose and b % 100 == 0:
            print('[predict] [b]:%s - prediction: %s' % (b, prediction))
    # prediction_file.close()
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def _loop(self):
        done = False
        total_reward, reward, iter = 0, 0, 0
        self.state = self.env.reset()
        while not done:
            action = self.policy()
            _state, reward, done, _ = self.env.step(action)
            # if _state is terminal, state value is 0
            v = 0 if done else self.state_value(_state)
            delta = reward + self.gamma * v - self.state_value(self.state)
            # \nabla_w v = s, since v = s^{\tim} w
            self.state_value_weight += self.beta * delta * to_tensor(self.state).float()
            # \pi(a) = x^{\top}(a)w, where x is feature and w is weight
            # \nabla\ln\pi(a) = x(a)\sum_b \pi(b)x(b)
            direction = self.feature(_state, action) - sum(
                    [self.softmax @ torch.cat([self.feature(_state, a).unsqueeze(0) for a in self.actions])])

            self.weight += self.alpha * pow(self.gamma, iter) * delta * direction
            total_reward += reward
            self.state = _state
            iter += 1
        return total_reward
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def _loop(self):
        done = False
        total_reward, reward, iter = 0, 0, 0
        self.state = self.env.reset()
        weight = self.weight
        while not done:
            action = self.policy()
            _state, reward, done, _ = self.env.step(action)
            # use current weight to generate an episode
            # \pi(a) = x^{\top}(a)w, where x is feature and w is weight
            # \nabla\ln\pi(a) = x(a)\sum_b \pi(b)x(b)
            delta = reward - self.state_value(_state)
            self.state_value_weight += self.beta * delta * to_tensor(_state).float()
            direction = self.feature(_state, action) - sum(
                [self.softmax @ torch.cat([self.feature(_state, a).unsqueeze(0) for a in self.actions])])
            weight += self.alpha * pow(self.gamma, iter) * delta * direction
            total_reward += reward
            iter += 1
        # update weight
        self.weight = weight
        return total_reward
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __getitem__(self, index):
        path, target = self.imgs[index]
        img = self.loader(path)
        if self.transform is not None:
            img_original = self.transform(img)
            img_original = np.asarray(img_original)

            img_lab = rgb2lab(img_original)
            img_lab = (img_lab + 128) / 255
            img_ab = img_lab[:, :, 1:3]
            img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1)))
            img_original = rgb2gray(img_original)
            img_original = torch.from_numpy(img_original)
        if self.target_transform is not None:
            target = self.target_transform(target)
        return (img_original, img_ab), target
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __getitem__(self, index):
        path, target = self.imgs[index]
        img = self.loader(path)

        img_scale = img.copy()
        img_original = img
        img_scale = scale_transform(img_scale)

        img_scale = np.asarray(img_scale)
        img_original = np.asarray(img_original)

        img_scale = rgb2gray(img_scale)
        img_scale = torch.from_numpy(img_scale)
        img_original = rgb2gray(img_original)
        img_original = torch.from_numpy(img_original)
        return (img_original, img_scale), target
项目:emu    作者:mlosch    | 项目源码 | 文件源码
def forward(self, input):
        input_torch = torch.from_numpy(input)
        if self.use_gpu:
            input_torch = input_torch.cuda()
        else:
            input_torch = input_torch.float()

        input_var = Variable(input_torch)

        # forward
        out = self.model.forward(input_var)

        if type(out) is list:
            clean_out = []
            for v in out:
                clean_out.append(v.data.cpu().numpy())
            out = clean_out
        else:
            out = out.data.cpu().numpy()
        self.ready = True

        return out
项目:sef    作者:passalis    | 项目源码 | 文件源码
def fast_heat_similarity_matrix(X, sigma):
    """
    PyTorch based similarity calculation
    :param X: the matrix with the data
    :param sigma: scaling factor
    :return: the similarity matrix
    """
    use_gpu = False
    # Use GPU if available
    if torch.cuda.device_count() > 0:
        use_gpu = True

    X = Variable(torch.from_numpy(np.float32(X)))
    sigma = Variable(torch.from_numpy(np.float32([sigma])))
    if use_gpu:
        X, sigma = X.cuda(), sigma.cuda()

    D = sym_heat_similarity_matrix(X, sigma)

    if use_gpu:
        D = D.cpu()

    return D.data.numpy()
项目:sef    作者:passalis    | 项目源码 | 文件源码
def __init__(self, input_dimensionality, output_dimensionality, scaler='default'):
        """
        Creats a Linear SEF object
        :param input_dimensionality: dimensionality of the input space
        :param output_dimensionality: dimensionality of the target space
        :param learning_rate: learning rate to be used for the optimization
        :param regularizer_weight: the weight of the regularizer
        :param scaler:
        """

        # Call base constructor
        SEF_Base.__init__(self, input_dimensionality, output_dimensionality, scaler)

        # Projection weights variables
        W = np.float32(0.1 * np.random.randn(self.input_dimensionality, output_dimensionality))
        self.W = Variable(torch.from_numpy(W), requires_grad=True)
        self.trainable_params = [self.W]
项目:SentEval    作者:facebookresearch    | 项目源码 | 文件源码
def trainepoch(self, X, y, nepoches=1):
        self.model.train()
        for _ in range(self.nepoch, self.nepoch + nepoches):
            permutation = np.random.permutation(len(X))
            all_costs = []
            for i in range(0, len(X), self.batch_size):
                # forward
                idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().cuda()
                Xbatch = Variable(X.index_select(0, idx))
                ybatch = Variable(y.index_select(0, idx))
                output = self.model(Xbatch)
                # loss
                loss = self.loss_fn(output, ybatch)
                all_costs.append(loss.data[0])
                # backward
                self.optimizer.zero_grad()
                loss.backward()
                # Update parameters
                self.optimizer.step()
        self.nepoch += nepoches
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_last_dim_softmax_does_softmax_on_last_dim(self):
        batch_size = 1
        length_1 = 5
        length_2 = 3
        num_options = 4
        options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
        for i in range(length_1):
            for j in range(length_2):
                options_array[0, i, j] = [2, 4, 0, 1]
        options_tensor = Variable(torch.from_numpy(options_array))
        softmax_tensor = util.last_dim_softmax(options_tensor).data.numpy()
        assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
        for i in range(length_1):
            for j in range(length_2):
                assert_almost_equal(softmax_tensor[0, i, j],
                                    [0.112457, 0.830953, 0.015219, 0.041371],
                                    decimal=5)
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_last_dim_softmax_handles_mask_correctly(self):
        batch_size = 1
        length_1 = 4
        length_2 = 3
        num_options = 5
        options_array = numpy.zeros((batch_size, length_1, length_2, num_options))
        for i in range(length_1):
            for j in range(length_2):
                options_array[0, i, j] = [2, 4, 0, 1, 6]
        mask = Variable(torch.IntTensor([[1, 1, 1, 1, 0]]))
        options_tensor = Variable(torch.from_numpy(options_array).float())
        softmax_tensor = util.last_dim_softmax(options_tensor, mask).data.numpy()
        assert softmax_tensor.shape == (batch_size, length_1, length_2, num_options)
        for i in range(length_1):
            for j in range(length_2):
                assert_almost_equal(softmax_tensor[0, i, j],
                                    [0.112457, 0.830953, 0.015219, 0.041371, 0.0],
                                    decimal=5)
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_weighted_sum_handles_uneven_higher_order_input(self):
        batch_size = 1
        length_1 = 5
        length_2 = 6
        length_3 = 2
        embedding_dim = 4
        sentence_array = numpy.random.rand(batch_size, length_3, embedding_dim)
        attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
        sentence_tensor = Variable(torch.from_numpy(sentence_array).float())
        attention_tensor = Variable(torch.from_numpy(attention_array).float())
        aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
        assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
        for i in range(length_1):
            for j in range(length_2):
                expected_array = (attention_array[0, i, j, 0] * sentence_array[0, 0] +
                                  attention_array[0, i, j, 1] * sentence_array[0, 1])
                numpy.testing.assert_almost_equal(aggregated_array[0, i, j], expected_array,
                                                  decimal=5)
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_add_sentence_boundary_token_ids_handles_3D_input(self):
        tensor = Variable(torch.from_numpy(
                numpy.array([[[1, 2, 3, 4],
                              [5, 5, 5, 5],
                              [6, 8, 1, 2]],
                             [[4, 3, 2, 1],
                              [8, 7, 6, 5],
                              [0, 0, 0, 0]]])))
        mask = ((tensor > 0).sum(dim=-1) > 0).type(torch.LongTensor)
        bos = Variable(torch.from_numpy(numpy.array([9, 9, 9, 9])))
        eos = Variable(torch.from_numpy(numpy.array([10, 10, 10, 10])))
        new_tensor, new_mask = util.add_sentence_boundary_token_ids(tensor, mask, bos, eos)
        expected_new_tensor = numpy.array([[[9, 9, 9, 9],
                                            [1, 2, 3, 4],
                                            [5, 5, 5, 5],
                                            [6, 8, 1, 2],
                                            [10, 10, 10, 10]],
                                           [[9, 9, 9, 9],
                                            [4, 3, 2, 1],
                                            [8, 7, 6, 5],
                                            [10, 10, 10, 10],
                                            [0, 0, 0, 0]]])
        assert (new_tensor.data.numpy() == expected_new_tensor).all()
        assert (new_mask.data.numpy() == ((expected_new_tensor > 0).sum(axis=-1) > 0)).all()
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def test_remove_sentence_boundaries(self):
        tensor = Variable(torch.from_numpy(numpy.random.rand(3, 5, 7)))
        mask = Variable(torch.from_numpy(
                # The mask with two elements is to test the corner case
                # of an empty sequence, so here we are removing boundaries
                # from  "<S> </S>"
                numpy.array([[1, 1, 0, 0, 0],
                             [1, 1, 1, 1, 1],
                             [1, 1, 1, 1, 0]]))).long()
        new_tensor, new_mask = util.remove_sentence_boundaries(tensor, mask)

        expected_new_tensor = Variable(torch.zeros(3, 3, 7))
        expected_new_tensor[1, 0:3, :] = tensor[1, 1:4, :]
        expected_new_tensor[2, 0:2, :] = tensor[2, 1:3, :]
        assert_array_almost_equal(new_tensor.data.numpy(), expected_new_tensor.data.numpy())

        expected_new_mask = Variable(torch.from_numpy(
                numpy.array([[0, 0, 0],
                             [1, 1, 1],
                             [1, 1, 0]]))).long()
        assert (new_mask.data.numpy() == expected_new_mask.data.numpy()).all()
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def __init__(self,
                 options_file: str,
                 weight_file: str) -> None:
        super(_ElmoCharacterEncoder, self).__init__()

        with open(cached_path(options_file), 'r') as fin:
            self._options = json.load(fin)
        self._weight_file = weight_file

        self.output_dim = self._options['lstm']['projection_dim']

        self._load_weights()

        # Cache the arrays for use in forward -- +1 due to masking.
        self._beginning_of_sentence_characters = Variable(torch.from_numpy(
                numpy.array(ELMoCharacterMapper.beginning_of_sentence_characters) + 1
        ))
        self._end_of_sentence_characters = Variable(torch.from_numpy(
                numpy.array(ELMoCharacterMapper.end_of_sentence_characters) + 1
        ))
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def as_tensor(self,
                  padding_lengths: Dict[str, int],
                  cuda_device: int = -1,
                  for_training: bool = True) -> torch.Tensor:
        max_shape = [padding_lengths["dimension_{}".format(i)]
                     for i in range(len(padding_lengths))]

        return_array = numpy.ones(max_shape, "float32") * self.padding_value

        # If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
        # form the right shaped list of slices for insertion into the final tensor.
        slicing_shape = list(self.array.shape)
        if len(self.array.shape) < len(max_shape):
            slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
        slices = [slice(0, x) for x in slicing_shape]
        return_array[slices] = self.array
        tensor = Variable(torch.from_numpy(return_array), volatile=not for_training)
        return tensor if cuda_device == -1 else tensor.cuda(cuda_device)
项目:mss_pytorch    作者:Js-Mim    | 项目源码 | 文件源码
def forward(self, H_j_dec, input_x):
        if torch.has_cudnn:
            # Input is of the shape : (B, T, N)
            input_x = Variable(torch.from_numpy(input_x[:, self._L:-self._L, :]).cuda(), requires_grad=True)

        else:
            # Input is of the shape : (B, T, N)
            # Cropping some "un-necessary" frequency sub-bands
            input_x = Variable(torch.from_numpy(input_x[:, self._L:-self._L, :]), requires_grad=True)

        # Decode/Sparsify mask
        mask_t1 = self.relu(self.ffDec(H_j_dec))
        # Apply skip-filtering connections
        Y_j = torch.mul(mask_t1, input_x)

        return Y_j, mask_t1
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __next__(self):
        def to_longest(insts):
            inst_data_tensor = Variable(torch.from_numpy(insts))
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()

        _start = self._step*self._batch_size
        _bsz = self._batch_size
        self._step += 1
        data = to_longest(self._src_sents[_start: _start+_bsz])
        label = to_longest(self._label[_start: _start+_bsz])
        return data, label.contiguous().view(-1)
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __next__(self):
        def pad_to_longest(insts, max_len):
            inst_data = np.array([inst + [const.PAD] * (max_len - len(inst)) for inst in insts])

            inst_data_tensor = Variable(torch.from_numpy(inst_data), volatile=self.evaluation)
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()

        _start = self._step*self._batch_size
        _bsz = self._batch_size
        self._step += 1
        data = pad_to_longest(self._src_sents[_start:_start+_bsz], self._max_len)
        label = Variable(torch.from_numpy(self._label[_start:_start+_bsz]),
                    volatile=self.evaluation)
        if self.cuda:
            label = label.cuda()

        return data, label
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __next__(self):
        def pad_to_longest(insts, max_len):
            inst_data = np.array([inst + [const.PAD] * (max_len - len(inst)) for inst in insts])

            inst_data_tensor = Variable(torch.from_numpy(inst_data), volatile=self.evaluation)
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()

        _start = self._step*self._batch_size
        _bsz = min(self._batch_size, self.sents_size-_start)
        self._step += 1
        data = pad_to_longest(self._src_sents[_start:_start+_bsz], self._max_len)
        label = Variable(torch.from_numpy(self._label[_start:_start+_bsz]),
                    volatile=self.evaluation)
        if self.cuda:
            label = label.cuda()

        return data, label
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __next__(self):
        def to_longest(insts):
            inst_data_tensor = Variable(torch.from_numpy(insts))
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        if self._step == self._stop_step:
            self._step = 0
            raise StopIteration()

        _start = self._step*self._batch_size
        _bsz = self._batch_size
        self._step += 1

        enc_input = to_longest(self._enc_sents[_start: _start+_bsz])
        dec_input = to_longest(self._dec_sents[_start: _start+_bsz])
        label = to_longest(self._label[_start: _start+_bsz])
        return enc_input, dec_input, label
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def _gaussian(self, enc_output):
        def latent_loss(mu, sigma):
            pow_mu = mu * mu
            pow_sigma = sigma * sigma
            return 0.5 * torch.mean(pow_mu + pow_sigma - torch.log(pow_sigma) - 1)

        mu = self._enc_mu(enc_output)
        sigma = torch.exp(.5 * self._enc_log_sigma(enc_output))
        self.latent_loss = latent_loss(mu, sigma)

        weight = next(self.parameters()).data
        std_z = Variable(weight.new(*sigma.size()), requires_grad=False)
        std_z.data.copy_(torch.from_numpy(
                np.random.normal(size=sigma.size())))

        return mu + sigma * std_z
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def get_batch(self, i, evaluation=False):
        def pad_to_longest(insts, max_len):
            inst_data = np.array([inst + [const.PAD] * (max_len - len(inst)) for inst in insts])
            inst_data_tensor = Variable(torch.from_numpy(inst_data), volatile=evaluation)
            if self.cuda:
                inst_data_tensor = inst_data_tensor.cuda()
            return inst_data_tensor

        bsz = min(self._batch_size, self._sents_size-1-i)

        src = pad_to_longest(self._src_sents[i:i+bsz], self._max_src)
        tgt = pad_to_longest(self._tgt_sents[i:i+bsz], self._max_tgt)
        label = Variable(torch.from_numpy(self._label[i:i+bsz]), volatile=evaluation)
        if self.cuda:
                label = label.cuda()

        return src, tgt, label
项目:Pytorch-Sketch-RNN    作者:alexis-jacq    | 项目源码 | 文件源码
def make_batch(batch_size):
    batch_idx = np.random.choice(len(data),batch_size)
    batch_sequences = [data[idx] for idx in batch_idx]
    strokes = []
    lengths = []
    indice = 0
    for seq in batch_sequences:
        len_seq = len(seq[:,0])
        new_seq = np.zeros((Nmax,5))
        new_seq[:len_seq,:2] = seq[:,:2]
        new_seq[:len_seq-1,2] = 1-seq[:-1,2]
        new_seq[:len_seq,3] = seq[:,2]
        new_seq[(len_seq-1):,4] = 1
        new_seq[len_seq-1,2:4] = 0
        lengths.append(len(seq[:,0]))
        strokes.append(new_seq)
        indice += 1

    if use_cuda:
        batch = Variable(torch.from_numpy(np.stack(strokes,1)).cuda().float())
    else:
        batch = Variable(torch.from_numpy(np.stack(strokes,1)).float())
    return batch, lengths

################################ adaptive lr
项目:pytorch-siamese    作者:delijati    | 项目源码 | 文件源码
def setUp(self):
        self.x0 = torch.from_numpy(
            # np.array(
            #     [[0.39834601, 0.6656751], [-0.44211167, -0.95197892],
            #      [0.52718359, 0.69099563], [-0.36314946, -0.07625845],
            #      [-0.53021497, -0.67317766]],
            #     dtype=np.float32)
            np.random.uniform(-1, 1, (5, 2)).astype(np.float32)
        )
        self.x1 = torch.from_numpy(
            # np.array(
            #     [[0.73587674, 0.98970324], [-0.9245277, 0.93210953],
            #      [-0.32989913, 0.36705822], [0.25636896, 0.10106555],
            #      [-0.11412049, 0.80171216]],
            #     dtype=np.float32)
            np.random.uniform(-1, 1, (5, 2)).astype(np.float32)
        )
        self.t = torch.from_numpy(
            # np.array(
            #     [1, 0, 1, 1, 0], dtype=np.float32)
            np.random.randint(0, 2, (5,)).astype(np.float32)
        )
        self.margin = 1
项目:audio    作者:pytorch    | 项目源码 | 文件源码
def __call__(self, tensor):
        """

        Args:
            tensor (Tensor): Tensor of audio of size (samples x channels)

        Returns:
            tensor (Tensor): n_mels x hops x channels (BxLxC), where n_mels is
                the number of mel bins, hops is the number of hops, and channels
                is unchanged.

        """
        if librosa is None:
            print("librosa not installed, cannot create spectrograms")
            return tensor
        L = []
        for i in range(tensor.size(1)):
            nparr = tensor[:, i].numpy()  # (samples, )
            sgram = librosa.feature.melspectrogram(
                nparr, **self.kwargs)  # (n_mels, hops)
            L.append(sgram)
        L = np.stack(L, 2)  # (n_mels, hops, channels)
        tensor = torch.from_numpy(L).type_as(tensor)

        return tensor
项目:MIL.pytorch    作者:gujiuxiang    | 项目源码 | 文件源码
def test_img(im, net, base_image_size, means):
    """
    Calls Caffe to get output for this image
    """
    batch_size = 1
    # Resize image
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= means

    im, gr, grr = upsample_image(im_orig, base_image_size)
    im = np.transpose(im, axes=(2, 0, 1))
    im = im[np.newaxis, :, :, :]

    # Pass into model
    mil_prob = net(Variable(torch.from_numpy(im), requires_grad=False).cuda())
    return mil_prob
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def setUpDatasets(self):
        # Build training dataset
        inputs, targets = self.generate_random_data(self.NUM_SAMPLES, (3, 32, 32),
                                                    num_classes=self.NUM_CLASSES,
                                                    dtype='float32')
        # Split to train and split
        train_inputs, train_targets = inputs[:self.NUM_TRAINING_SAMPLES], \
                                      targets[:self.NUM_TRAINING_SAMPLES]
        validate_inputs, validate_targets = inputs[self.NUM_TRAINING_SAMPLES:], \
                                            targets[self.NUM_TRAINING_SAMPLES:]
        # Convert to tensor and build dataset
        train_dataset = TensorDataset(torch.from_numpy(train_inputs),
                                      torch.from_numpy(train_targets))
        validate_dataset = TensorDataset(torch.from_numpy(validate_inputs),
                                         torch.from_numpy(validate_targets))
        # Build dataloaders from dataset
        self.train_loader = DataLoader(train_dataset, batch_size=16,
                                       shuffle=True, num_workers=2, pin_memory=False)
        self.validate_loader = DataLoader(validate_dataset, batch_size=16,
                                          shuffle=True, num_workers=2, pin_memory=False)
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def create_torch_variable(self, value, gpu=False):
        """Convenience method that produces a tensor given the value of the defined type.

        Returns: a torch tensor of same type.
        """
        if isinstance(value, torch.autograd.Variable):
            if gpu:
                value = value.cuda()
            return value
        if not torch.is_tensor(value):
            if not isinstance(value, np.ndarray):
                value = np.array(value, dtype=self.dtype.as_numpy_dtype)
            else:
                value = value.astype(self.dtype.as_numpy_dtype)
            if value.size == 0:
                return value
            allowed = [tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.int8]
            if self.dtype in allowed:
                value = torch.autograd.Variable(torch.from_numpy(value))
        else:
            value = torch.autograd.Variable(value)
        if gpu and isinstance(value, torch.autograd.Variable):
            value = value.cuda()
        return value
项目:chinese_generation    作者:polaroidz    | 项目源码 | 文件源码
def batch_generator(batch_size, nb_batches):
    batch_count = 0

    while True:
        pos = batch_count * batch_size
        batch = dataset[pos:pos+batch_size]

        X = np.zeros((batch_size, 1, img_size, img_size), dtype=np.float32)

        for k, path in enumerate(batch):
            im = io.imread(path)
            im = color.rgb2gray(im)

            X[k] = im[np.newaxis, ...]

        X = torch.from_numpy(X)
        X = Variable(X)

        yield X, batch

        batch_count += 1

        if batch_count > nb_batches:
            batch_count = 0
项目:pytorch-smoothgrad    作者:pkdn    | 项目源码 | 文件源码
def preprocess_image(img, cuda=False):
    means=[0.485, 0.456, 0.406]
    stds=[0.229, 0.224, 0.225]

    preprocessed_img = img.copy()[: , :, ::-1]
    for i in range(3):
        preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
        preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
    preprocessed_img = \
        np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
    preprocessed_img = torch.from_numpy(preprocessed_img)
    preprocessed_img.unsqueeze_(0)
    if cuda:
        preprocessed_img = Variable(preprocessed_img.cuda(), requires_grad=True)
    else:
        preprocessed_img = Variable(preprocessed_img, requires_grad=True)

    return preprocessed_img
项目:pytorch-smoothgrad    作者:pkdn    | 项目源码 | 文件源码
def __call__(self, x, index=None):
        output = self.pretrained_model(x)

        if index is None:
            index = np.argmax(output.data.cpu().numpy())

        one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
        one_hot[0][index] = 1
        if self.cuda:
            one_hot = Variable(torch.from_numpy(one_hot).cuda(), requires_grad=True)
        else:
            one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
        one_hot = torch.sum(one_hot * output)

        one_hot.backward(retain_variables=True)

        grad = x.grad.data.cpu().numpy()
        grad = grad[0, :, :, :]

        return grad
项目:MMdnn    作者:Microsoft    | 项目源码 | 文件源码
def _layer_BatchNorm(self):
        self.add_body(0, """
    @staticmethod
    def __batch_normalization(dim, name, **kwargs):
        if   dim == 1:  layer = nn.BatchNorm1d(**kwargs)
        elif dim == 2:  layer = nn.BatchNorm2d(**kwargs)
        elif dim == 3:  layer = nn.BatchNorm3d(**kwargs)
        else:           raise NotImplementedError()

        if 'scale' in __weights_dict[name]:
            layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
        else:
            layer.weight.data.fill_(1)

        if 'bias' in __weights_dict[name]:
            layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
        else:
            layer.bias.data.fill_(0)

        layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
        layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
        return layer""")
项目:textobjdetection    作者:andfoy    | 项目源码 | 文件源码
def pull_item(self, index):
        img_id = self.ids[index]

        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id)
        height, width, channels = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target, width, height)

        if self.transform is not None:
            target = np.array(target)
            img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
            # to rgb
            img = img[:, :, (2, 1, 0)]
            # img = img.transpose(2, 0, 1)
            target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
        return torch.from_numpy(img).permute(2, 0, 1), target, height, width
        # return torch.from_numpy(img), target, height, width
项目:pytorch-es    作者:atgambardella    | 项目源码 | 文件源码
def perturb_model(args, model, random_seed, env):
    """
    Modifies the given model with a pertubation of its parameters,
    as well as the negative perturbation, and returns both perturbed
    models.
    """
    new_model = ES(env.observation_space.shape[0],
                   env.action_space, args.small_net)
    anti_model = ES(env.observation_space.shape[0],
                    env.action_space, args.small_net)
    new_model.load_state_dict(model.state_dict())
    anti_model.load_state_dict(model.state_dict())
    np.random.seed(random_seed)
    for (k, v), (anti_k, anti_v) in zip(new_model.es_params(),
                                        anti_model.es_params()):
        eps = np.random.normal(0, 1, v.size())
        v += torch.from_numpy(args.sigma*eps).float()
        anti_v += torch.from_numpy(args.sigma*-eps).float()
    return [new_model, anti_model]
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def __init__(self, args):
        super(GRU, self).__init__()
        self.args = args
        # print(args)

        self.hidden_dim = args.lstm_hidden_dim
        self.num_layers = args.lstm_num_layers
        V = args.embed_num
        D = args.embed_dim
        C = args.class_num
        # self.embed = nn.Embedding(V, D, max_norm=args.max_norm)
        self.embed = nn.Embedding(V, D)
        # word embedding
        if args.word_Embedding:
            pretrained_weight = np.array(args.pretrained_weight)
            self.embed.weight.data.copy_(torch.from_numpy(pretrained_weight))
        # gru
        self.gru = nn.GRU(D, self.hidden_dim, dropout=args.dropout, num_layers=self.num_layers)
        # linear
        self.hidden2label = nn.Linear(self.hidden_dim, C)
        # hidden
        self.hidden = self.init_hidden(self.num_layers, args.batch_size)
        # dropout
        self.dropout = nn.Dropout(args.dropout)
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def __init__(self, args):
        super(CNN_Text,self).__init__()
        self.args = args

        V = args.embed_num
        D = args.embed_dim
        C = args.class_num
        Ci = 1
        Co = args.kernel_num
        Ks = args.kernel_sizes

        self.embed = nn.Embedding(V, D)
        # print("aaaaaaaa", self.embed.weight)
        pretrained_weight = np.array(args.pretrained_weight)
        self.embed.weight.data.copy_(torch.from_numpy(pretrained_weight))
        # print("bbbbbbbb", self.embed.weight)

        self.convs1 = [nn.Conv2d(Ci, Co, (K, D)) for K in Ks]
        '''
        self.conv13 = nn.Conv2d(Ci, Co, (3, D))
        self.conv14 = nn.Conv2d(Ci, Co, (4, D))
        self.conv15 = nn.Conv2d(Ci, Co, (5, D))
        '''
        self.dropout = nn.Dropout(args.dropout)
        self.fc1 = nn.Linear(len(Ks)*Co, C)
项目:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch    作者:bamtercelboo    | 项目源码 | 文件源码
def __init__(self, args):
        super(BiGRU, self).__init__()
        self.args = args
        # print(args)

        self.hidden_dim = args.lstm_hidden_dim
        self.num_layers = args.lstm_num_layers
        V = args.embed_num
        D = args.embed_dim
        C = args.class_num
        # self.embed = nn.Embedding(V, D, max_norm=args.max_norm)
        self.embed = nn.Embedding(V, D)
        # word embedding
        if args.word_Embedding:
            pretrained_weight = np.array(args.pretrained_weight)
            self.embed.weight.data.copy_(torch.from_numpy(pretrained_weight))
        # gru
        self.bigru = nn.GRU(D, self.hidden_dim, dropout=args.dropout, num_layers=self.num_layers, bidirectional=True)
        # linear
        self.hidden2label = nn.Linear(self.hidden_dim * 2, C)
        # hidden
        self.hidden = self.init_hidden(self.num_layers, args.batch_size)
        # dropout
        self.dropout = nn.Dropout(args.dropout)
项目:arc-pytorch    作者:sanyam5    | 项目源码 | 文件源码
def fetch_batch(self, part, batch_size: int = None):

        if batch_size is None:
            batch_size = self.batch_size

        X, Y = self._fetch_batch(part, batch_size)

        X = Variable(torch.from_numpy(X)).view(2*batch_size, self.image_size, self.image_size)

        X1 = X[:batch_size]  # (B, h, w)
        X2 = X[batch_size:]  # (B, h, w)

        X = torch.stack([X1, X2], dim=1)  # (B, 2, h, w)

        Y = Variable(torch.from_numpy(Y))

        if use_cuda:
            X, Y = X.cuda(), Y.cuda()

        return X, Y
项目:LSUV-pytorch    作者:ducha-aiki    | 项目源码 | 文件源码
def orthogonal_weights_init(m):
    if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
        if hasattr(m, 'weight_v'):
            w_ortho = svd_orthonormal(m.weight_v.data.cpu().numpy())
            m.weight_v.data = torch.from_numpy(w_ortho)
            try:
                nn.init.constant(m.bias, 0)
            except:
                pass
        else:
            #nn.init.orthogonal(m.weight)
            w_ortho = svd_orthonormal(m.weight.data.cpu().numpy())
            #print w_ortho 
            #m.weight.data.copy_(torch.from_numpy(w_ortho))
            m.weight.data = torch.from_numpy(w_ortho)
            try:
                nn.init.constant(m.bias, 0)
            except:
                pass
    return
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def _variable(self, state):
        state = th.from_numpy(state).float()
        if len(state.size()) < 2:
            state = state.unsqueeze(0)
        return V(state)