Python torch.nn.functional 模块,conv1d() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用torch.nn.functional.conv1d()

项目:diracnets    作者:szagoruyko    | 项目源码 | 文件源码
def forward(self, input):
        return F.conv1d(input, self.alpha * Variable(self.delta) + self.beta * normalize(self.weight),
                        self.bias, self.stride, self.padding, self.dilation)
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def forward(self, input):
        return F.conv1d(input, self.weight, self.bias, self.stride,
                        self.padding, self.dilation, self.groups)
项目:pytorch-dnc    作者:jingweiz    | 项目源码 | 文件源码
def _shift(self, wg_vb, shift_vb):
        """
        variables needed:
            wg_vb:    [batch_size x num_heads x mem_hei]
            shift_vb: [batch_size x num_heads x num_allowed_shifts]
                   -> sum=1; the shift weight vector
        returns:
            ws_vb:    [batch_size x num_heads x mem_hei]
                   -> the attention weight by location focus
        """
        batch_size = wg_vb.size(0)
        input_dim = wg_vb.size(2); assert input_dim == self.mem_hei
        filter_dim = shift_vb.size(2); assert filter_dim == self.num_allowed_shifts

        ws_vb = None
        for i in range(batch_size): # for each head in each batch, the kernel is different ... seems there's no other way by doing the loop here
            for j in range(self.num_heads):
                ws_tmp_vb = F.conv1d(wg_vb[i][j].unsqueeze(0).unsqueeze(0).repeat(1, 1, 3),
                                     shift_vb[i][j].unsqueeze(0).unsqueeze(0).contiguous(),
                                     padding=filter_dim//2)[:, :, input_dim:(2*input_dim)]
                if ws_vb is None:
                    ws_vb = ws_tmp_vb
                else:
                    ws_vb = torch.cat((ws_vb, ws_tmp_vb), 0)
        ws_vb = ws_vb.view(-1, self.num_heads, self.mem_hei)
        return ws_vb
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_dirac_identity(self):
        batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
        # Test 1D
        input_var = Variable(torch.randn(batch, in_c, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv1d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data  # Variables do not support nonzero
        self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :])  # Assert in_c outputs are preserved
        assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0  # Assert extra outputs are 0

        # Test 2D
        input_var = Variable(torch.randn(batch, in_c, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv2d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0

        # Test 3D
        input_var = Variable(torch.randn(batch, in_c, size, size, size))
        filter_var = Variable(torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size))
        init.dirac(filter_var)
        output_var = F.conv3d(input_var, filter_var)
        input_tensor, output_tensor = input_var.data, output_var.data
        self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
        assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)
项目:contiguous-succotash    作者:kefirski    | 项目源码 | 文件源码
def forward(self, decoder_input, z, drop_prob):
        """
        :param decoder_input: tensor with shape of [batch_size, seq_len, embed_size]
        :param z: sequence latent variable with shape of [batch_size, latent_variable_size]
        :param drop_prob: probability of an element of decoder input to be zeroed in sense of dropout

        :return: unnormalized logits of sentense words distribution probabilities
                 with shape of [batch_size, seq_len, word_vocab_size]
        """

        assert parameters_allocation_check(self), \
            'Invalid CUDA options. Parameters should be allocated in the same memory'

        [batch_size, seq_len, _] = decoder_input.size()

        '''
            decoder is conditioned on context via additional bias = W_cond * z to every input token
        '''

        z = t.cat([z] * seq_len, 1).view(batch_size, seq_len, self.params.latent_variable_size)
        decoder_input = t.cat([decoder_input, z], 2)
        decoder_input = F.dropout(decoder_input, drop_prob)

        # x is tensor with shape [batch_size, input_size=in_channels, seq_len=input_width]
        x = decoder_input.transpose(1, 2).contiguous()

        for layer, kernel in enumerate(self.kernels):
            # apply conv layer with non-linearity and drop last elements of sequence to perfrom input shifting
            x = F.conv1d(x, kernel,
                         bias=self.biases[layer],
                         dilation=self.params.decoder_dilations[layer],
                         padding=self.params.decoder_paddings[layer])

            x_width = x.size()[2]
            x = x[:, :, :(x_width - self.params.decoder_paddings[layer])].contiguous()

            x = F.relu(x)


        x = x.transpose(1, 2).contiguous()
        x = x.view(-1, self.out_size)
        x = self.fc(x)
        result = x.view(-1, seq_len, self.params.word_vocab_size)

        return result
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_calculate_gain_linear(self):
        for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
            gain = init.calculate_gain(fn)
            self.assertEqual(gain, 1)