Python torch.nn.functional 模块,affine_grid() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用torch.nn.functional.affine_grid()

项目:faster-rcnn.pytorch    作者:jwyang    | 项目源码 | 文件源码
def _affine_grid_gen(rois, input_size, grid_size):

    rois = rois.detach()
    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = input_size[0]
    width = input_size[1]

    zero = Variable(rois.data.new(rois.size(0), 1).zero_())
    theta = torch.cat([\
      (x2 - x1) / (width - 1),
      zero,
      (x1 + x2 - width + 1) / (width - 1),
      zero,
      (y2 - y1) / (height - 1),
      (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)

    grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, grid_size, grid_size)))

    return grid
项目:pytorch-faster-rcnn    作者:ruotianluo    | 项目源码 | 文件源码
def _crop_pool_layer(self, bottom, rois, max_pool=True):
    # implement it using stn
    # box to affine
    # input (x1,y1,x2,y2)
    """
    [  x2-x1             x1 + x2 - W + 1  ]
    [  -----      0      ---------------  ]
    [  W - 1                  W - 1       ]
    [                                     ]
    [           y2-y1    y1 + y2 - H + 1  ]
    [    0      -----    ---------------  ]
    [           H - 1         H - 1      ]
    """
    rois = rois.detach()

    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = bottom.size(2)
    width = bottom.size(3)

    # affine theta
    theta = Variable(rois.data.new(rois.size(0), 2, 3).zero_())
    theta[:, 0, 0] = (x2 - x1) / (width - 1)
    theta[:, 0 ,2] = (x1 + x2 - width + 1) / (width - 1)
    theta[:, 1, 1] = (y2 - y1) / (height - 1)
    theta[:, 1, 2] = (y1 + y2 - height + 1) / (height - 1)

    if max_pool:
      pre_pool_size = cfg.POOLING_SIZE * 2
      grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))
      crops = F.grid_sample(bottom.expand(rois.size(0), bottom.size(1), bottom.size(2), bottom.size(3)), grid)
      crops = F.max_pool2d(crops, 2, 2)
    else:
      grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
      crops = F.grid_sample(bottom.expand(rois.size(0), bottom.size(1), bottom.size(2), bottom.size(3)), grid)

    return crops
项目:pyro    作者:uber    | 项目源码 | 文件源码
def window_to_image(z_where, window_size, image_size, windows):
    n = windows.size(0)
    assert windows.size(1) == window_size ** 2, 'Size mismatch.'
    theta = expand_z_where(z_where)
    grid = F.affine_grid(theta, torch.Size((n, 1, image_size, image_size)))
    out = F.grid_sample(windows.view(n, 1, window_size, window_size), grid)
    return out.view(n, image_size, image_size)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def image_to_window(z_where, window_size, image_size, images):
    n = images.size(0)
    assert images.size(1) == images.size(2) == image_size, 'Size mismatch.'
    theta_inv = expand_z_where(z_where_inv(z_where))
    grid = F.affine_grid(theta_inv, torch.Size((n, 1, window_size, window_size)))
    out = F.grid_sample(images.view(n, 1, image_size, image_size), grid)
    return out.view(n, -1)


# Helper to expand parameters to the size of the mini-batch. I would
# like to remove this and just write `t.expand(n, -1)` inline, but the
# `-1` argument of `expand` doesn't seem to work with PyTorch 0.2.0.
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_affine_grid(self):
        # test known input on CPU
        input = Variable(torch.arange(1, 7).view(1, 2, 3))
        output = F.affine_grid(input, torch.Size([1, 1, 2, 2]))
        groundtruth = torch.Tensor(
            [[[0, -3], [2, 5]], [[4, 7], [6, 15]]]).view(1, 2, 2, 2)
        self.assertEqual(output.data, groundtruth)

        # do gradcheck
        N = random.randint(1, 8)
        C = random.randint(1, 8)
        H = random.randint(1, 8)
        W = random.randint(1, 8)
        sz = torch.Size([N, C, H, W])
        inp = Variable(torch.randn(N, 2, 3), requires_grad=True)
        self.assertTrue(gradcheck(lambda inp: F.affine_grid(inp, sz), (inp,)))

        # test CPU against CUDA
        if TEST_CUDNN:
            input_cpu = Variable(torch.randn(N, 2, 3), requires_grad=True)
            out_cpu = F.affine_grid(input_cpu, sz)
            gradients = torch.randn(out_cpu.size())
            out_cpu.backward(gradients)
            input_gpu = Variable(input_cpu.data.cuda(), requires_grad=True)
            out_cuda = F.affine_grid(input_gpu, sz)
            out_cuda.backward(gradients.cuda())
            self.assertEqual(out_cpu, out_cuda)
            self.assertEqual(input_cpu.grad, input_gpu.grad)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_affine_grid(self):
        # test known input on CPU
        input = Variable(torch.arange(1, 7).view(1, 2, 3))
        output = F.affine_grid(input, torch.Size([1, 1, 2, 2]))
        groundtruth = torch.Tensor(
            [[[0, -3], [2, 5]], [[4, 7], [6, 15]]]).view(1, 2, 2, 2)
        self.assertEqual(output.data, groundtruth)

        # do gradcheck
        N = random.randint(1, 8)
        C = random.randint(1, 8)
        H = random.randint(1, 8)
        W = random.randint(1, 8)
        sz = torch.Size([N, C, H, W])
        inp = Variable(torch.randn(N, 2, 3), requires_grad=True)
        self.assertTrue(gradcheck(lambda inp: F.affine_grid(inp, sz), (inp,)))

        # test CPU against CUDA
        if TEST_CUDNN:
            input_cpu = Variable(torch.randn(N, 2, 3), requires_grad=True)
            out_cpu = F.affine_grid(input_cpu, sz)
            gradients = torch.randn(out_cpu.size())
            out_cpu.backward(gradients)
            input_gpu = Variable(input_cpu.data.cuda(), requires_grad=True)
            out_cuda = F.affine_grid(input_gpu, sz)
            out_cuda.backward(gradients.cuda())
            self.assertEqual(out_cpu, out_cuda)
            self.assertEqual(input_cpu.grad, input_gpu.grad)
项目:tutorials    作者:pytorch    | 项目源码 | 文件源码
def stn(self, x):
        xs = self.localization(x)
        xs = xs.view(-1, 10 * 3 * 3)
        theta = self.fc_loc(xs)
        theta = theta.view(-1, 2, 3)

        grid = F.affine_grid(theta, x.size())
        x = F.grid_sample(x, grid)

        return x
项目:faster-rcnn.pytorch    作者:jwyang    | 项目源码 | 文件源码
def _crop_pool_layer(bottom, rois, max_pool=True):
    # code modified from 
    # https://github.com/ruotianluo/pytorch-faster-rcnn
    # implement it using stn
    # box to affine
    # input (x1,y1,x2,y2)
    """
    [  x2-x1             x1 + x2 - W + 1  ]
    [  -----      0      ---------------  ]
    [  W - 1                  W - 1       ]
    [                                     ]
    [           y2-y1    y1 + y2 - H + 1  ]
    [    0      -----    ---------------  ]
    [           H - 1         H - 1      ]
    """
    rois = rois.detach()
    batch_size = bottom.size(0)
    D = bottom.size(1)
    H = bottom.size(2)
    W = bottom.size(3)
    roi_per_batch = rois.size(0) / batch_size
    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = bottom.size(2)
    width = bottom.size(3)

    # affine theta
    zero = Variable(rois.data.new(rois.size(0), 1).zero_())
    theta = torch.cat([\
      (x2 - x1) / (width - 1),
      zero,
      (x1 + x2 - width + 1) / (width - 1),
      zero,
      (y2 - y1) / (height - 1),
      (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)

    if max_pool:
      pre_pool_size = cfg.POOLING_SIZE * 2
      grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))
      bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W)\
                                                                .contiguous().view(-1, D, H, W)
      crops = F.grid_sample(bottom, grid)
      crops = F.max_pool2d(crops, 2, 2)
    else:
      grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
      bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W)\
                                                                .contiguous().view(-1, D, H, W)
      crops = F.grid_sample(bottom, grid)

    return crops, grid