Python torch.nn.functional 模块,conv3d() 实例源码

我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用torch.nn.functional.conv3d()

项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def forward(self, input):
        return F.conv3d(input, self.alpha * Variable(self.delta) + self.beta * normalize(self.weight),
                        self.bias, self.stride, self.padding, self.dilation)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)