Python torch.nn 模块,ConvTranspose1d() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用torch.nn.ConvTranspose1d()

项目:samplernn-pytorch    作者:deepsound-project    | 项目源码 | 文件源码
def __init__(self, in_channels, out_channels, kernel_size, bias=True):
        super().__init__()

        self.conv_t = nn.ConvTranspose1d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=kernel_size,
            bias=False
        )

        if bias:
            self.bias = nn.Parameter(
                torch.FloatTensor(out_channels, kernel_size)
            )
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def __init__(self, noise_input_size, cond_input_size):
        super(_netG, self).__init__()
        self.noise_input_size = noise_input_size
        self.cond_input_size = cond_input_size

        # first dense block
        # input shape [batch_size x 147]
        self.fc1 = nn.Sequential(
            nn.Linear(self.noise_input_size + self.cond_input_size, 100 * 10),
            nn.BatchNorm1d(100 * 10),
            nn.LeakyReLU(0.2, inplace=True)
        )
        # Convolutional block
        self.conv1 = nn.Sequential(
            # input shape [batch_size x 10 x 100]
            nn.ConvTranspose1d(10, 250, 13, stride=2, padding=6,
                              output_padding=1, bias=True),
            nn.BatchNorm1d(250),
            nn.LeakyReLU(0.2, inplace=True),

            # input shape [batch_size x 250 x 200]
            nn.ConvTranspose1d(250, 100, 13, stride=2, padding=6,
                              output_padding=1, bias=True),
            nn.BatchNorm1d(100),
            nn.LeakyReLU(0.2, inplace=True),

             # input shape [batch_size x 100 x 400]
            nn.ConvTranspose1d(100, 1, 13, stride=1, padding=6,
                              bias=True),
            nn.BatchNorm1d(1),
            # input shape [batch_size x 1 x 400]
            nn.Tanh()
        )
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def conv_transpose(in_ch, out_ch, kernel_size,
                      stride=1, padding=0, out_padding=0, 
                      dilation=1, groups=1, bias=True, dim=2):

    #TODO: in the future some preprocessing goes here
    in_dim = dim
    if in_dim == 1:
        return nn.ConvTranspose1d(in_ch, out_ch, kernel_size,
                        stride=stride,
                        padding=padding,
                        output_padding=out_padding,
                        dilation=dilation,
                        groups=groups,
                        bias=bias)

    elif in_dim == 2:
        return nn.ConvTranspose2d(in_ch, out_ch, kernel_size,
                        stride=stride,
                        padding=padding,
                        output_padding=out_padding,
                        dilation=dilation,
                        groups=groups,
                        bias=bias)

    elif in_dim == 3:
        return nn.ConvTranspose3d(in_ch, out_ch, kernel_size,
                        stride=stride,
                        padding=padding,
                        output_padding=out_padding,
                        dilation=dilation,
                        groups=groups,
                        bias=bias)

# pooling
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_conv_modules_raise_error_on_incorrect_input_size(self):
        modules = [nn.Conv1d(3, 8, 3), nn.ConvTranspose1d(3, 8, 3),
                   nn.Conv2d(3, 8, 3), nn.ConvTranspose2d(3, 8, 3),
                   nn.Conv3d(3, 8, 3), nn.ConvTranspose3d(3, 8, 3)]

        invalid_input_dims = [(2, 4), (2, 4),
                              (3, 5), (3, 5),
                              (4, 6), (4, 6)]

        for invalid_dims, module in zip(invalid_input_dims, modules):
            for dims in invalid_dims:
                input = Variable(torch.Tensor(torch.Size((3, ) * dims)))
                self.assertRaises(ValueError, lambda: module(input))
项目:pointGAN    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, num_points = 2500):
        super(PointGenC, self).__init__()
        self.conv1 = nn.ConvTranspose1d(100, 1024, 2,2,0)
        self.conv2 = nn.ConvTranspose1d(1024, 512, 5,5,0)
        self.conv3 = nn.ConvTranspose1d(512, 256, 5,5,0)
        self.conv4 = nn.ConvTranspose1d(256, 128, 2,2,0)
        self.conv5 = nn.ConvTranspose1d(128, 64, 5,5,0)
        self.conv6 = nn.ConvTranspose1d(64, 3, 5,5,0)

        self.bn1 = torch.nn.BatchNorm1d(1024)
        self.bn2 = torch.nn.BatchNorm1d(512)
        self.bn3 = torch.nn.BatchNorm1d(256)
        self.bn4 = torch.nn.BatchNorm1d(128)
        self.bn5 = torch.nn.BatchNorm1d(64)
        self.th = nn.Tanh()
项目:pytorch-planet-amazon    作者:rwightman    | 项目源码 | 文件源码
def is_sparseable(m):
    return True if hasattr(m, 'weight') and isinstance(m, (
            nn.Conv1d, nn.Conv2d, nn.Conv3d,
            nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d,
            nn.Linear)) else False
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_conv_modules_raise_error_on_incorrect_input_size(self):
        modules = [nn.Conv1d(3, 8, 3), nn.ConvTranspose1d(3, 8, 3),
                   nn.Conv2d(3, 8, 3), nn.ConvTranspose2d(3, 8, 3),
                   nn.Conv3d(3, 8, 3), nn.ConvTranspose3d(3, 8, 3)]

        invalid_input_dims = [(2, 4), (2, 4),
                              (3, 5), (3, 5),
                              (4, 6), (4, 6)]

        for invalid_dims, module in zip(invalid_input_dims, modules):
            for dims in invalid_dims:
                input = Variable(torch.Tensor(torch.Size((3, ) * dims)))
                self.assertRaises(ValueError, lambda: module(input))
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()

        self.deconv0 = nn.ConvTranspose1d(nz, ngf * 8, 4, 1, 0, bias=False)
        self.deconv1 = nn.ConvTranspose1d(ngf * 8, ngf * 4, 4, 2, 1, bias=False)
        self.deconv2 = nn.ConvTranspose1d(ngf * 4, ngf * 2, 4, 2, 1, bias=False)
        self.deconv3 = nn.ConvTranspose1d(ngf * 2, ngf * 1, 4, 2, 1, bias=False)
        self.deconv4 = nn.ConvTranspose1d(ngf * 1, ngf / 2, 4, 2, 1, bias=False)
        self.deconv5 = nn.ConvTranspose1d(ngf / 2, ngf / 4, 4, 2, 1, bias=False)
        self.deconv6 = nn.ConvTranspose1d(ngf / 4, ngf / 8, 4, 2, 1, bias=False)
        self.deconv7 = nn.ConvTranspose1d(ngf / 8, ngf / 16, 4, 2, 1, bias=False)
        self.deconv8 = nn.ConvTranspose1d(ngf / 16, ngf / 32, 4, 2, 1, bias=False)
        self.deconv9 = nn.ConvTranspose1d(ngf / 32, nc, 4, 2, 1, bias=False)                


        self.bn0 = nn.BatchNorm1d(ngf * 8)
        self.bn1 = nn.BatchNorm1d(ngf * 4)
        self.bn2 = nn.BatchNorm1d(ngf * 2)
        self.bn3 = nn.BatchNorm1d(ngf * 1)
        self.bn4 = nn.BatchNorm1d(ngf / 2)
        self.bn5 = nn.BatchNorm1d(ngf / 4)
        self.bn6 = nn.BatchNorm1d(ngf / 8)
        self.bn7 = nn.BatchNorm1d(ngf / 16)
        self.bn8 = nn.BatchNorm1d(ngf / 32)        

        self.relu = nn.ReLU(True)

        self.tanh = nn.Tanh()

        self.apply(weights_init)

        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)