Python torch 模块,randperm() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.randperm()

项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def _make_dataloaders(train_set, valid_set, test_set, train_size, valid_size, batch_size):
    # Split training into train and validation
    indices = torch.randperm(len(train_set))
    train_indices = indices[:len(indices)-valid_size][:train_size or None]
    valid_indices = indices[len(indices)-valid_size:] if valid_size else None

    train_loader = torch.utils.data.DataLoader(train_set, pin_memory=True, batch_size=batch_size,
                                               sampler=SubsetRandomSampler(train_indices))
    test_loader = torch.utils.data.DataLoader(test_set, pin_memory=True, batch_size=batch_size)
    if valid_size:
        valid_loader = torch.utils.data.DataLoader(valid_set, pin_memory=True, batch_size=batch_size,
                                                   sampler=SubsetRandomSampler(valid_indices))
    else:
        valid_loader = None

    return train_loader, valid_loader, test_loader
项目:treelstm.pytorch    作者:dasguptar    | 项目源码 | 文件源码
def train(self, dataset):
        self.model.train()
        self.optimizer.zero_grad()
        total_loss = 0.0
        indices = torch.randperm(len(dataset))
        for idx in tqdm(range(len(dataset)),desc='Training epoch ' + str(self.epoch + 1) + ''):
            ltree, lsent, rtree, rsent, label = dataset[indices[idx]]
            linput, rinput = Var(lsent), Var(rsent)
            target = Var(map_label_to_target(label, dataset.num_classes))
            if self.args.cuda:
                linput, rinput = linput.cuda(), rinput.cuda()
                target = target.cuda()
            output = self.model(ltree, linput, rtree, rinput)
            loss = self.criterion(output, target)
            total_loss += loss.data[0]
            loss.backward()
            if idx % self.args.batchsize == 0 and idx > 0:
                self.optimizer.step()
                self.optimizer.zero_grad()
        self.epoch += 1
        return total_loss / len(dataset)

    # helper function for testing
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def random(nin, nout, nto):
            nker = nto * nout
            tbl = torch.Tensor(nker, 2)
            fi = torch.randperm(nin)
            frcntr = 0
            nfi = math.floor(nin / nto) # number of distinct nto chunks
            totbl = tbl.select(1, 1)
            frtbl = tbl.select(1, 0)
            fitbl = fi.narrow(0, 0, (nfi * nto)) # part of fi that covers distinct chunks
            ufrtbl = frtbl.unfold(0, nto, nto)
            utotbl = totbl.unfold(0, nto, nto)
            ufitbl = fitbl.unfold(0, nto, nto)

            # start fill_ing frtbl
            for i in range(nout): # fro each unit in target map
                ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
                frcntr += 1
                if frcntr-1 == nfi: # reset fi
                    fi.copy_(torch.randperm(nin))
                    frcntr = 1

            for tocntr in range(utotbl.size(0)):
                utotbl.select(0, tocntr).fill_(tocntr)

            return tbl
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_index_copy(self):
        num_copy, num_dest = 3, 20
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long()
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].copy_(src[i])
        self.assertEqual(dest, dest2, 0)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long()
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = src[i]
        self.assertEqual(dest, dest2, 0)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_index_add(self):
        num_copy, num_dest = 3, 3
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long()
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].add_(src[i])
        self.assertEqual(dest, dest2)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy).long()
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = dest2[idx[i]] + src[i]
        self.assertEqual(dest, dest2)

    # Fill idx with valid indices.
项目:tnt    作者:pytorch    | 项目源码 | 文件源码
def resample(self, seed=None):
        """Resample the dataset.

        Args:
            seed (int, optional): Seed for resampling. By default no seed is
            used.
        """
        if seed is not None:
            gen = torch.manual_seed(seed)
        else:
            gen = torch.default_generator

        if self.replacement:
            self.perm = torch.LongTensor(len(self)).random_(
                len(self.dataset), generator=gen)
        else:
            self.perm = torch.randperm(
                len(self.dataset), generator=gen).narrow(0, 0, len(self))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def random(nin, nout, nto):
            nker = nto * nout
            tbl = torch.Tensor(nker, 2)
            fi = torch.randperm(nin)
            frcntr = 0
            nfi = math.floor(nin / nto)  # number of distinct nto chunks
            totbl = tbl.select(1, 1)
            frtbl = tbl.select(1, 0)
            fitbl = fi.narrow(0, 0, (nfi * nto))  # part of fi that covers distinct chunks
            ufrtbl = frtbl.unfold(0, nto, nto)
            utotbl = totbl.unfold(0, nto, nto)
            ufitbl = fitbl.unfold(0, nto, nto)

            # start fill_ing frtbl
            for i in range(nout):  # fro each unit in target map
                ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
                frcntr += 1
                if frcntr - 1 == nfi:  # reset fi
                    fi.copy_(torch.randperm(nin))
                    frcntr = 1

            for tocntr in range(utotbl.size(0)):
                utotbl.select(0, tocntr).fill_(tocntr)

            return tbl
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_index_copy(self):
        num_copy, num_dest = 3, 20
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].copy_(src[i])
        self.assertEqual(dest, dest2, 0)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = src[i]
        self.assertEqual(dest, dest2, 0)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_index_add(self):
        num_copy, num_dest = 3, 3
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].add_(src[i])
        self.assertEqual(dest, dest2)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = dest2[idx[i]] + src[i]
        self.assertEqual(dest, dest2)

    # Fill idx with valid indices.
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def random(nin, nout, nto):
            nker = nto * nout
            tbl = torch.Tensor(nker, 2)
            fi = torch.randperm(nin)
            frcntr = 0
            nfi = math.floor(nin / nto)  # number of distinct nto chunks
            totbl = tbl.select(1, 1)
            frtbl = tbl.select(1, 0)
            fitbl = fi.narrow(0, 0, (nfi * nto))  # part of fi that covers distinct chunks
            ufrtbl = frtbl.unfold(0, nto, nto)
            utotbl = totbl.unfold(0, nto, nto)
            ufitbl = fitbl.unfold(0, nto, nto)

            # start fill_ing frtbl
            for i in range(nout):  # fro each unit in target map
                ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
                frcntr += 1
                if frcntr - 1 == nfi:  # reset fi
                    fi.copy_(torch.randperm(nin))
                    frcntr = 1

            for tocntr in range(utotbl.size(0)):
                utotbl.select(0, tocntr).fill_(tocntr)

            return tbl
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_index_copy(self):
        num_copy, num_dest = 3, 20
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].copy_(src[i])
        self.assertEqual(dest, dest2, 0)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = src[i]
        self.assertEqual(dest, dest2, 0)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_index_add(self):
        num_copy, num_dest = 3, 3
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].add_(src[i])
        self.assertEqual(dest, dest2)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = dest2[idx[i]] + src[i]
        self.assertEqual(dest, dest2)

    # Fill idx with valid indices.
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def __iter__(self):
        # deterministically shuffle based on epoch
        g = torch.Generator()
        g.manual_seed(self.epoch)
        indices = list(torch.randperm(len(self.dataset), generator=g))

        # add extra samples to make it evenly divisible
        indices += indices[:(self.total_size - len(indices))]
        assert len(indices) == self.total_size

        # subsample
        offset = self.num_samples * self.rank
        indices = indices[offset:offset + self.num_samples]
        assert len(indices) == self.num_samples

        return iter(indices)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def random(nin, nout, nto):
            nker = nto * nout
            tbl = torch.Tensor(nker, 2)
            fi = torch.randperm(nin)
            frcntr = 0
            nfi = math.floor(nin / nto)  # number of distinct nto chunks
            totbl = tbl.select(1, 1)
            frtbl = tbl.select(1, 0)
            fitbl = fi.narrow(0, 0, (nfi * nto))  # part of fi that covers distinct chunks
            ufrtbl = frtbl.unfold(0, nto, nto)
            utotbl = totbl.unfold(0, nto, nto)
            ufitbl = fitbl.unfold(0, nto, nto)

            # start fill_ing frtbl
            for i in range(nout):  # fro each unit in target map
                ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
                frcntr += 1
                if frcntr - 1 == nfi:  # reset fi
                    fi.copy_(torch.randperm(nin))
                    frcntr = 1

            for tocntr in range(utotbl.size(0)):
                utotbl.select(0, tocntr).fill_(tocntr)

            return tbl
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_index_copy(self):
        num_copy, num_dest = 3, 20
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].copy_(src[i])
        self.assertEqual(dest, dest2, 0)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = src[i]
        self.assertEqual(dest, dest2, 0)
项目:pytorch-avitm    作者:hyqneuron    | 项目源码 | 文件源码
def train():
    for epoch in xrange(args.num_epoch):
        all_indices = torch.randperm(tensor_tr.size(0)).split(args.batch_size)
        loss_epoch = 0.0
        model.train()                   # switch to training mode
        for batch_indices in all_indices:
            if not args.nogpu: batch_indices = batch_indices.cuda()
            input = Variable(tensor_tr[batch_indices])
            recon, loss = model(input, compute_loss=True)
            # optimize
            optimizer.zero_grad()       # clear previous gradients
            loss.backward()             # backprop
            optimizer.step()            # update parameters
            # report
            loss_epoch += loss.data[0]    # add loss to loss_epoch
        if epoch % 5 == 0:
            print('Epoch {}, loss={}'.format(epoch, loss_epoch / len(all_indices)))
项目:treelstm-pytorch    作者:pklfz    | 项目源码 | 文件源码
def train(self, dataset):
        self.model.train()
        self.optimizer.zero_grad()
        loss, k = 0.0, 0
        indices = torch.randperm(len(dataset))
        for idx in tqdm(range(len(dataset)), desc='Training epoch ' + str(self.epoch + 1) + ''):
            ltree, lsent, rtree, rsent, label = dataset[indices[idx]]
            linput, rinput = Var(lsent), Var(rsent)
            target = Var(map_label_to_target(label, dataset.num_classes))
            if self.args.cuda:
                linput, rinput = linput.cuda(), rinput.cuda()
                target = target.cuda()
            output = self.model(ltree, linput, rtree, rinput)
            err = self.criterion(output, target)
            loss += err.data[0]
            err.backward()
            k += 1
            if k % self.args.batchsize == 0:
                self.optimizer.step()
                self.optimizer.zero_grad()
        self.epoch += 1
        return loss / len(dataset)

    # helper function for testing
项目:torchsample    作者:ncullen93    | 项目源码 | 文件源码
def __call__(self, *inputs):
        outputs = []
        for idx, _input in enumerate(inputs):
            size = _input.size()
            img_height = size[1]
            img_width = size[2]

            x_blocks = int(img_height/self.blocksize) # number of x blocks
            y_blocks = int(img_width/self.blocksize)
            ind = th.randperm(x_blocks*y_blocks)

            new = th.zeros(_input.size())
            count = 0
            for i in range(x_blocks):
                for j in range (y_blocks):
                    row = int(ind[count] / x_blocks)
                    column = ind[count] % x_blocks
                    new[:, i*self.blocksize:(i+1)*self.blocksize, j*self.blocksize:(j+1)*self.blocksize] = \
                    _input[:, row*self.blocksize:(row+1)*self.blocksize, column*self.blocksize:(column+1)*self.blocksize]
                    count += 1
            outputs.append(new)
        return outputs if idx > 1 else outputs[0]
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def __iter__(self):
        # deterministically shuffle based on epoch
        g = torch.Generator()
        g.manual_seed(self.epoch)
        indices = list(torch.randperm(len(self.dataset), generator=g))

        # add extra samples to make it evenly divisible
        indices += indices[:(self.total_size - len(indices))]
        assert len(indices) == self.total_size

        # subsample
        offset = self.num_samples * self.rank
        indices = indices[offset:offset + self.num_samples]
        assert len(indices) == self.num_samples

        return iter(indices)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def random(nin, nout, nto):
            nker = nto * nout
            tbl = torch.Tensor(nker, 2)
            fi = torch.randperm(nin)
            frcntr = 0
            nfi = math.floor(nin / nto)  # number of distinct nto chunks
            totbl = tbl.select(1, 1)
            frtbl = tbl.select(1, 0)
            fitbl = fi.narrow(0, 0, (nfi * nto))  # part of fi that covers distinct chunks
            ufrtbl = frtbl.unfold(0, nto, nto)
            utotbl = totbl.unfold(0, nto, nto)
            ufitbl = fitbl.unfold(0, nto, nto)

            # start fill_ing frtbl
            for i in range(nout):  # fro each unit in target map
                ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
                frcntr += 1
                if frcntr - 1 == nfi:  # reset fi
                    fi.copy_(torch.randperm(nin))
                    frcntr = 1

            for tocntr in range(utotbl.size(0)):
                utotbl.select(0, tocntr).fill_(tocntr)

            return tbl
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_index_copy(self):
        num_copy, num_dest = 3, 20
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].copy_(src[i])
        self.assertEqual(dest, dest2, 0)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_copy_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = src[i]
        self.assertEqual(dest, dest2, 0)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_index_add(self):
        num_copy, num_dest = 3, 3
        dest = torch.randn(num_dest, 4, 5)
        src = torch.randn(num_copy, 4, 5)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]].add_(src[i])
        self.assertEqual(dest, dest2)

        dest = torch.randn(num_dest)
        src = torch.randn(num_copy)
        idx = torch.randperm(num_dest).narrow(0, 0, num_copy)
        dest2 = dest.clone()
        dest.index_add_(0, idx, src)
        for i in range(idx.size(0)):
            dest2[idx[i]] = dest2[idx[i]] + src[i]
        self.assertEqual(dest, dest2)
项目:verb-attributes    作者:uwnlp    | 项目源码 | 文件源码
def cosine_ranking_loss(input_data, ctx, margin=0.1):
    """
    :param input_data: [batch_size, 300] tensor of predictions
    :param ctx: [batch_size, 300] tensor of ground truths
    :param margin: Difference between them
    :return: 
    """
    normed = _normalize(input_data)
    ctx_normed = _normalize(ctx)
    shuff_inds = torch.randperm(normed.size(0))
    if ctx.is_cuda:
        shuff_inds = shuff_inds.cuda()
    shuff = ctx_normed[shuff_inds]

    correct_contrib = torch.sum(normed * ctx_normed, 1).squeeze()
    incorrect_contrib = torch.sum(normed * shuff, 1).squeeze()

    # similarity = torch.mm(normed, ctx_normed.t()) #[predictions, gts]
    # correct_contrib = similarity.diag()
    # incorrect_contrib = incorrect_contrib.sum(1).squeeze()/(incorrect_contrib.size(1)-1.0)
    #
    cost = (0.1 + incorrect_contrib-correct_contrib).clamp(min=0)

    return cost, correct_contrib, incorrect_contrib
项目:TreeLSTMSentiment    作者:ttpro1995    | 项目源码 | 文件源码
def train(self, dataset):
        self.model.train()
        self.optimizer.zero_grad()
        loss, k = 0.0, 0
        indices = torch.randperm(len(dataset))
        for idx in tqdm(xrange(len(dataset)),desc='Training epoch '+str(self.epoch+1)+''):
            ltree,lsent,rtree,rsent,label = dataset[indices[idx]]
            linput, rinput = Var(lsent), Var(rsent)
            target = Var(map_label_to_target(label,dataset.num_classes))
            if self.args.cuda:
                linput, rinput = linput.cuda(), rinput.cuda()
                target = target.cuda()
            output = self.model(ltree,linput,rtree,rinput)
            err = self.criterion(output, target)
            loss += err.data[0]
            err.backward()
            k += 1
            if k%self.args.batchsize==0:
                self.optimizer.step()
                self.optimizer.zero_grad()
        self.epoch += 1
        return loss/len(dataset)

    # helper function for testing
项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def __call__(self, img):
        if self.transforms is None:
            return img
        order = torch.randperm(len(self.transforms))
        for i in order:
            img = self.transforms[i](img)
        return img
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __iter__(self):
        return iter(torch.randperm(self.num_samples).long())
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def __iter__(self):
        for i in range(10):
            yield torch.randn(2, 10), torch.randperm(10)[:2]
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_len(self):
        source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
        self.assertEqual(len(source), 15)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def setUp(self):
        self.data = torch.randn(100, 2, 3, 5)
        self.labels = torch.randperm(50).repeat(2)
        self.dataset = TensorDataset(self.data, self.labels)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def _fill_indices(self, idx, dim, dim_size, elems_per_row, m, n, o):
        for i in range(1 if dim == 0 else m):
            for j in range(1 if dim == 1 else n):
                for k in range(1 if dim == 2 else o):
                    ii = [i, j, k]
                    ii[dim] = slice(0, idx.size(dim)+1)
                    idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row]
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_permute(self):
        orig = [1, 2, 3, 4, 5, 6, 7]
        perm = list(torch.randperm(7).long())
        x = torch.Tensor(*orig).fill_(0)
        new = list(map(lambda x: x - 1, x.permute(*perm).size()))
        self.assertEqual(perm, new)
        self.assertEqual(x.size(), orig)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser(description="parse args")
    parser.add_argument('-n', '--num-epochs', default=1000, type=int)
    parser.add_argument('-b', '--batch-size', default=N, type=int)
    parser.add_argument('--cuda', action='store_true')
    args = parser.parse_args()
    data = build_linear_dataset(N, p)
    if args.cuda:
        # make tensors and modules CUDA
        data = data.cuda()
        softplus.cuda()
        regression_model.cuda()
    for j in range(args.num_epochs):
        if args.batch_size == N:
            # use the entire data set
            epoch_loss = svi.step(data)
        else:
            # mini batch
            epoch_loss = 0.0
            perm = torch.randperm(N) if not args.cuda else torch.randperm(N).cuda()
            # shuffle data
            data = data[perm]
            # get indices of each batch
            all_batches = get_batch_indices(N, args.batch_size)
            for ix, batch_start in enumerate(all_batches[:-1]):
                batch_end = all_batches[ix + 1]
                batch_data = data[batch_start: batch_end]
                epoch_loss += svi.step(batch_data)
        if j % 100 == 0:
            print("epoch avg loss {}".format(epoch_loss/float(N)))
项目:pyro    作者:uber    | 项目源码 | 文件源码
def set_model_permutations(self):
        self.model_permutations = []
        self.model_unpermutations = []
        for n in range(1, self.N):
            permutation = list(range(2 ** (n - 1)))
            if n > 1:
                while permutation == list(range(2 ** (n - 1))):
                    permutation = torch.randperm(2 ** (n - 1)).numpy().tolist()
            self.model_permutations.append(permutation)

            unpermutation = list(range(len(permutation)))
            for i in range(len(permutation)):
                unpermutation[permutation[i]] = i
            self.model_unpermutations.append(unpermutation)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, input_dim, hidden_dim, output_dim_multiplier=1,
                 mask_encoding=None, permutation=None):
        super(AutoRegressiveNN, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim_multiplier = output_dim_multiplier

        if mask_encoding is None:
            # the dependency structure is chosen at random
            self.mask_encoding = 1 + torch_multinomial(torch.ones(input_dim - 1) / (input_dim - 1),
                                                       num_samples=hidden_dim, replacement=True)
        else:
            # the dependency structure is given by the user
            self.mask_encoding = mask_encoding

        if permutation is None:
            # a permutation is chosen at random
            self.permutation = torch.randperm(input_dim)
        else:
            # the permutation is chosen by the user
            self.permutation = permutation

        # these masks control the autoregressive structure
        self.mask1 = Variable(torch.zeros(hidden_dim, input_dim))
        self.mask2 = Variable(torch.zeros(input_dim * self.output_dim_multiplier, hidden_dim))

        for k in range(hidden_dim):
            # fill in mask1
            m_k = self.mask_encoding[k]
            slice_k = torch.cat([torch.ones(m_k), torch.zeros(input_dim - m_k)])
            for j in range(input_dim):
                self.mask1[k, self.permutation[j]] = slice_k[j]
            # fill in mask2
            slice_k = torch.cat([torch.zeros(m_k), torch.ones(input_dim - m_k)])
            for r in range(self.output_dim_multiplier):
                for j in range(input_dim):
                    self.mask2[r * input_dim + self.permutation[j], k] = slice_k[j]

        self.lin1 = MaskedLinear(input_dim, hidden_dim, self.mask1)
        self.lin2 = MaskedLinear(hidden_dim, input_dim * output_dim_multiplier, self.mask2)
        self.relu = nn.ReLU()
项目:pyro    作者:uber    | 项目源码 | 文件源码
def sample(self):
        """
        :returns: a random subsample of `range(size)`
        :rtype: torch.autograd.Variable of torch.LongTensor
        """
        subsample_size = self.subsample_size
        if subsample_size is None or subsample_size > self.size:
            subsample_size = self.size
        if subsample_size == self.size:
            result = Variable(torch.LongTensor(list(range(self.size))))
        else:
            result = Variable(torch.randperm(self.size)[:self.subsample_size])
        return result.cuda() if self.use_cuda else result
项目:ExperimentPackage_PyTorch    作者:ICEORY    | 项目源码 | 文件源码
def __call__(self, img):
        if self.transforms is None:
            return img
        order = torch.randperm(len(self.transforms))
        for i in order:
            img = self.transforms[i](img)
        return img
项目:pytorch-a2c-ppo-acktr    作者:ikostrikov    | 项目源码 | 文件源码
def recurrent_generator(self, advantages, num_mini_batch):
        num_processes = self.rewards.size(1)
        num_envs_per_batch = num_processes // num_mini_batch
        perm = torch.randperm(num_processes)
        for start_ind in range(0, num_processes, num_envs_per_batch):
            observations_batch = []
            states_batch = []
            actions_batch = []
            return_batch = []
            masks_batch = []
            old_action_log_probs_batch = []
            adv_targ = []

            for offset in range(num_envs_per_batch):
                ind = perm[start_ind + offset]
                observations_batch.append(self.observations[:-1, ind])
                states_batch.append(self.states[0:1, ind])
                actions_batch.append(self.actions[:, ind])
                return_batch.append(self.returns[:-1, ind])
                masks_batch.append(self.masks[:-1, ind])
                old_action_log_probs_batch.append(self.action_log_probs[:, ind])
                adv_targ.append(advantages[:, ind])

            observations_batch = torch.cat(observations_batch, 0)
            states_batch = torch.cat(states_batch, 0)
            actions_batch = torch.cat(actions_batch, 0)
            return_batch = torch.cat(return_batch, 0)
            masks_batch = torch.cat(masks_batch, 0)
            old_action_log_probs_batch = torch.cat(old_action_log_probs_batch, 0)
            adv_targ = torch.cat(adv_targ, 0)

            yield observations_batch, states_batch, actions_batch, \
                return_batch, masks_batch, old_action_log_probs_batch, adv_targ
项目:open-reid    作者:Cysu    | 项目源码 | 文件源码
def __iter__(self):
        indices = torch.randperm(self.num_samples)
        ret = []
        for i in indices:
            pid = self.pids[i]
            t = self.index_dic[pid]
            if len(t) >= self.num_instances:
                t = np.random.choice(t, size=self.num_instances, replace=False)
            else:
                t = np.random.choice(t, size=self.num_instances, replace=True)
            ret.extend(t)
        return iter(ret)
项目:treehopper    作者:tomekkorbak    | 项目源码 | 文件源码
def train(self, dataset):
        self.model.train()
        self.embedding_model.train()
        self.embedding_model.zero_grad()
        self.optimizer.zero_grad()
        loss, k = 0.0, 0
        # torch.manual_seed(789)
        indices = torch.randperm(len(dataset))
        for idx in tqdm(range(len(dataset)),desc='Training epoch '+str(self.epoch+1)+''):
            tree, sent, label = dataset[indices[idx]]
            input = Var(sent)
            target = Var(torch.LongTensor([int(label)]))
            if self.args.cuda:
                input = input.cuda()
                target = target.cuda()
            emb = F.torch.unsqueeze(self.embedding_model(input), 1)
            output, err, _, _ = self.model.forward(tree, emb, training=True)
            #params = self.model.childsumtreelstm.getParameters()
            # params_norm = params.norm()
            err = err/self.args.batchsize # + 0.5*self.args.reg*params_norm*params_norm # custom bias
            loss += err.data[0] #
            err.backward()
            k += 1
            if k==self.args.batchsize:
                for f in self.embedding_model.parameters():
                    f.data.sub_(f.grad.data * self.args.emblr)
                self.optimizer.step()
                self.embedding_model.zero_grad()
                self.optimizer.zero_grad()
                k = 0
        self.epoch += 1
        return loss/len(dataset)

    # helper function for testing
项目:AoAReader    作者:kevinkwl    | 项目源码 | 文件源码
def shuffle(self):
        data = list(zip(self.documents, self.querys, self.candidates, self.answers))
        self.documents, self.querys, self.candidates, self.answers = zip(*[data[i] for i in torch.randperm(len(data))])
项目:bigBatch    作者:eladhoffer    | 项目源码 | 文件源码
def __call__(self, img):
        if self.transforms is None:
            return img
        order = torch.randperm(len(self.transforms))
        for i in order:
            img = self.transforms[i](img)
        return img
项目:NeuralMT    作者:hlt-mt    | 项目源码 | 文件源码
def _prepare_corpora(self, corpora, bpe_encoder, src_vocab, trg_vocab):
        src, trg = [], []
        sizes = []
        count, ignored = 0, 0

        for corpus in corpora:
            with corpus.reader([self._source_lang, self._target_lang]) as reader:
                for source, target in reader:
                    src_words = bpe_encoder.encode_line(source, is_source=True)
                    trg_words = bpe_encoder.encode_line(target, is_source=False)

                    if len(src_words) > 0 and len(trg_words) > 0:
                        src.append(src_vocab.convertToIdx(src_words,
                                                          onmt.Constants.UNK_WORD))
                        trg.append(trg_vocab.convertToIdx(trg_words,
                                                          onmt.Constants.UNK_WORD,
                                                          onmt.Constants.BOS_WORD,
                                                          onmt.Constants.EOS_WORD))
                        sizes.append(len(src_words))
                    else:
                        ignored += 1

                    count += 1
                    if count % 100000 == 0:
                        self._logger.info(' %d sentences prepared' % count)

        self._logger.info('Shuffling sentences')
        perm = torch.randperm(len(src))
        src = [src[idx] for idx in perm]
        trg = [trg[idx] for idx in perm]
        sizes = [sizes[idx] for idx in perm]

        self._logger.info('Sorting sentences by size')
        _, perm = torch.sort(torch.Tensor(sizes))
        src = [src[idx] for idx in perm]
        trg = [trg[idx] for idx in perm]

        self._logger.info('Prepared %d sentences (%d ignored due to length == 0)' % (len(src), ignored))

        return src, trg
项目:NeuralMT    作者:hlt-mt    | 项目源码 | 文件源码
def shuffle(self):
        data = list(zip(self.src, self.tgt))
        self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
项目:alpha-dimt-icmlws    作者:sotetsuk    | 项目源码 | 文件源码
def shuffle(self):
        data = list(zip(self.src, self.tgt))
        self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
项目:vqa.pytorch    作者:Cadene    | 项目源码 | 文件源码
def __iter__(self):
        return iter(torch.randperm(self.num_samples).long())
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __iter__(self):
        return iter(torch.randperm(self.num_samples).long())
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __iter__(self):
        return (self.indices[i] for i in torch.randperm(len(self.indices)))
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def gather_variable(shape, index_dim, max_indices):
    assert len(shape) == 2
    assert index_dim < 2
    batch_dim = 1 - index_dim
    index = torch.LongTensor(*shape)
    for i in range(shape[index_dim]):
        index.select(index_dim, i).copy_(
            torch.randperm(max_indices)[:shape[batch_dim]])
    return Variable(index, requires_grad=False)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def __iter__(self):
        for i in range(10):
            yield torch.randn(2, 10), torch.randperm(10)[:2]
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def setUp(self):
        self.data = torch.randn(100, 2, 3, 5)
        self.labels = torch.randperm(50).repeat(2)
        self.dataset = TensorDataset(self.data, self.labels)