Python torch.nn 模块,Tanh() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.Tanh()

项目:SeqMatchSeq    作者:pcgreat    | 项目源码 | 文件源码
def __init__(self, window_sizes, cov_dim, mem_dim):
        super(NewConvModule, self).__init__()
        self.window_sizes = window_sizes
        self.cov_dim = cov_dim
        self.mem_dim = mem_dim

        self.linear1 = nn.Linear(len(window_sizes) * mem_dim, mem_dim)
        self.relu1 = nn.ReLU()
        self.tanh1 = nn.Tanh()
项目:CycleGAN-Tensorflow-PyTorch-Simple    作者:LynnHo    | 项目源码 | 文件源码
def __init__(self, dim=64):
        super(Generator, self).__init__()

        conv_bn_relu = conv_norm_act
        dconv_bn_relu = dconv_norm_act

        self.ls = nn.Sequential(nn.ReflectionPad2d(3),
                                conv_bn_relu(3, dim * 1, 7, 1),
                                conv_bn_relu(dim * 1, dim * 2, 3, 2, 1),
                                conv_bn_relu(dim * 2, dim * 4, 3, 2, 1),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                ResiduleBlock(dim * 4, dim * 4),
                                dconv_bn_relu(dim * 4, dim * 2, 3, 2, 1, 1),
                                dconv_bn_relu(dim * 2, dim * 1, 3, 2, 1, 1),
                                nn.ReflectionPad2d(3),
                                nn.Conv2d(dim, 3, 7, 1),
                                nn.Tanh())
项目:SimGAN_pytorch    作者:AlexHex7    | 项目源码 | 文件源码
def __init__(self, block_num, in_features, nb_features=64):
        super(Refiner, self).__init__()

        self.conv_1 = nn.Sequential(
            nn.Conv2d(in_features, nb_features, 3, stride=1, padding=1),
            nn.BatchNorm2d(nb_features)
        )

        blocks = []
        for i in range(block_num):
            blocks.append(ResnetBlock(nb_features, nb_features))

        self.resnet_blocks = nn.Sequential(*blocks)

        self.conv_2 = nn.Sequential(
            nn.Conv2d(nb_features, in_features, 1, 1, 0),
            nn.Tanh()
        )
项目:SeqMatchSeq    作者:pcgreat    | 项目源码 | 文件源码
def new_proj_module(self):
        emb_dim = self.emb_dim
        mem_dim = self.mem_dim

        class NewProjModule(nn.Module):
            def __init__(self, emb_dim, mem_dim):
                super(NewProjModule, self).__init__()
                self.emb_dim = emb_dim
                self.mem_dim = mem_dim
                self.linear1 = nn.Linear(self.emb_dim, self.mem_dim)
                self.linear2 = nn.Linear(self.emb_dim, self.mem_dim)

            def forward(self, input):
                i = nn.Sigmoid()(self.linear1(input))
                u = nn.Tanh()(self.linear2(input))
                out = i.mul(u)  # CMulTable().updateOutput([i, u])
                return out

        module = NewProjModule(emb_dim, mem_dim)

        # if getattr(self, "proj_module_master", None):  # share parameters
        #     for (tar_param, src_param) in zip(module.parameters(), self.proj_module_master.parameters()):
        #         tar_param.grad.data = src_param.grad.data.clone()

        return module
项目:pointGAN    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, num_points = 2048):
        super(PointCodeGen, self).__init__()
        self.num_points = num_points
        self.fc1 = nn.Linear(100, 64)
        self.fc2 = nn.Linear(64, 32)
        self.fc3 = nn.Linear(32, 64)
        self.fc4 = nn.Linear(64, (100) * 4)


        self.fc5 = nn.Linear(100, 64)
        self.fc6 = nn.Linear(64, 32)
        self.fc7 = nn.Linear(32, 64)
        self.fc8 = nn.Linear(64, (3) * 4)


        self.th = nn.Tanh()
项目:pointGAN    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, num_points = 2048):
        super(PointGenPSG, self).__init__()
        self.num_points = num_points
        self.fc1 = nn.Linear(100, 256)
        self.fc2 = nn.Linear(256, 512)
        self.fc3 = nn.Linear(512, 1024)
        self.fc4 = nn.Linear(1024, self.num_points / 4 * 3 * 1)
        self.th = nn.Tanh()

        self.conv1 = nn.ConvTranspose2d(100,1024,(2,3))
        self.conv2 = nn.ConvTranspose2d(1024, 512, 4, 2, 1)
        self.conv3 = nn.ConvTranspose2d(512, 256, 4, 2, 1)
        self.conv4= nn.ConvTranspose2d(256, 128, 4, 2, 1)
        self.conv5= nn.ConvTranspose2d(128, 3, 4, 2, 1)

        self.bn1 = torch.nn.BatchNorm2d(1024)
        self.bn2 = torch.nn.BatchNorm2d(512)
        self.bn3 = torch.nn.BatchNorm2d(256)
        self.bn4 = torch.nn.BatchNorm2d(128)
        self.bn5 = torch.nn.BatchNorm2d(3)
项目:pointGAN    作者:fxia22    | 项目源码 | 文件源码
def __init__(self, num_points = 2048):
        super(PointCodeGen, self).__init__()
        self.num_points = num_points
        self.fc1 = nn.Linear(100, 64)
        self.fc2 = nn.Linear(64, 32)
        self.fc3 = nn.Linear(32, 64)
        self.fc4 = nn.Linear(64, (100) * 4)


        self.fc5 = nn.Linear(100, 64)
        self.fc6 = nn.Linear(64, 32)
        self.fc7 = nn.Linear(32, 64)
        self.fc8 = nn.Linear(64, (3) * 4)


        self.th = nn.Tanh()
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def __init__(self, image_size, n_chan, n_hidden, n_z, ngpu):
        super(Decoder, self).__init__()

        assert image_size % 16 == 0, "Image size should be a multiple of 16"

        self.image_size = image_size
        self.n_chan = n_chan
        self.n_hidden   = n_hidden
        self.n_z    = n_z
        self.ngpu   = ngpu
        self.decoder    = nn.Sequential()

        decoder_layers  = []

        decoder_layers  = make_conv_layer(decoder_layers, n_z, n_hidden, back_conv=True, k_s_p=[4,1,0])
        cur_size    = 4
        while cur_size < image_size//2:
            decoder_layers  = make_conv_layer(decoder_layers, n_hidden, n_hidden//2, back_conv=True)
            n_hidden    = n_hidden//2
            cur_size    = cur_size*2
        decoder_layers  = make_conv_layer(decoder_layers, n_hidden, n_chan, back_conv=True, batch_norm=False, activation='Tanh')

        for i, layer in enumerate(decoder_layers):
            self.decoder.add_module('component_{0}'.format(i+1), layer)
项目:self-critical.pytorch    作者:ruotianluo    | 项目源码 | 文件源码
def __init__(self, opt):
        super(AdaAtt_attention, self).__init__()
        self.input_encoding_size = opt.input_encoding_size
        #self.rnn_type = opt.rnn_type
        self.rnn_size = opt.rnn_size
        self.drop_prob_lm = opt.drop_prob_lm
        self.att_hid_size = opt.att_hid_size

        # fake region embed
        self.fr_linear = nn.Sequential(
            nn.Linear(self.rnn_size, self.input_encoding_size),
            nn.ReLU(), 
            nn.Dropout(self.drop_prob_lm))
        self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)

        # h out embed
        self.ho_linear = nn.Sequential(
            nn.Linear(self.rnn_size, self.input_encoding_size),
            nn.Tanh(), 
            nn.Dropout(self.drop_prob_lm))
        self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)

        self.alpha_net = nn.Linear(self.att_hid_size, 1)
        self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
项目:examples    作者:pytorch    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netG, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d(     nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2,     ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(    ngf,      nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:keita    作者:iwasaki-kenta    | 项目源码 | 文件源码
def __init__(self, num_classes=2, embed_dim=300, fc_dim=512, hidden_dim=512,
                 encoder=BidirectionalEncoder,
                 **encoder_params):
        super(LinearNet, self).__init__()

        self.encoder = encoder(embed_dim=embed_dim, hidden_dim=hidden_dim, **encoder_params)
        self.encoder_dim = encoder.get_output_size(hidden_dim)

        # Multiply by 2 for 2x hidden states in a bidirectional encoder.
        if "Bidirectional" in encoder.__class__.__name__:
            self.encoder_dim = self.encoder_dim * 2

        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(self.encoder_dim, fc_dim),
            nn.Tanh(),
            nn.Dropout(p=0.5),
            nn.Linear(fc_dim, num_classes),
        )
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.main = nn.Sequential(
            nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 2, ngf * 1, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 1),
            nn.ReLU(True),
            nn.ConvTranspose2d(ngf * 1, nc, 4, 2, 1, bias=False),
            nn.Tanh()
        )
        self.apply(weights_init)
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self, sampler):
        super(Agent, self).__init__()

        self.sampler = sampler

        nc = 64
        self.conv0 = nn.Conv2d(self.sampler.num_memory_channels, nc, 3, stride=(1,1), padding=1, bias=False)
        self.conv1 = nn.Conv2d(nc, nc * 2, 3, stride=(1,2), padding=1, bias=False)
        self.conv2 = nn.Conv2d(nc * 2, nc * 4, 3, stride=(1,2), padding=1, bias=False)
        self.conv3 = nn.Conv2d(nc * 4, nc, 3, stride=(1,2), padding=1, bias=False)

        self.maxPool = nn.MaxPool2d(2)

        self.bn0 = nn.BatchNorm2d(nc * 1)
        self.bn1 = nn.BatchNorm2d(nc * 2)
        self.bn2 = nn.BatchNorm2d(nc * 4)
        self.bn3 = nn.BatchNorm2d(nc)

        self.fc0_size = 64 * 32 * 4
        self.fc0 = nn.Linear(self.fc0_size, sampler.num_future_channels * sampler.future_size, bias=False)

        self.tanh = nn.Tanh()
        self.sigmoid = nn.Sigmoid()

        self.apply(weights_init)
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self, mpii, batch_size):
        super(HeatmapModel, self).__init__()

        self.heatmap_size = mpii.heatmap_size

        ndf = 32
        self.conv0 = nn.Conv2d(mpii.image_num_components, ndf, 11, stride=2)
        self.conv1 = nn.Conv2d(ndf, ndf * 2, 9, stride=2)
        self.conv2 = nn.Conv2d(ndf * 2, ndf * 4, 7, stride=2)
        self.conv3 = nn.Conv2d(ndf * 4, ndf * 8, 5, stride=2)                

        self.fc0_size = 256 * 3 * 3
        self.fc0 = nn.Linear(self.fc0_size, mpii.heatmap_size * mpii.heatmap_size)        

        self.relu = nn.ReLU(inplace=True)
        self.tanh = nn.Tanh()
        self.sigmoid = nn.Sigmoid()

        self.loss = nn.BCELoss().cuda()

        self.images = Variable(torch.FloatTensor(batch_size, mpii.image_num_components, mpii.image_size, mpii.image_size)).cuda()
        self.labels = Variable(torch.FloatTensor(batch_size, self.heatmap_size, self.heatmap_size)).cuda()
项目:CBEGAN    作者:taey16    | 项目源码 | 文件源码
def __init__(self, nc, ngf, hidden_size, condition=False, condition_size=0):
    super(Decoder, self).__init__()
    self.condition = condition

    self.decode_cond = nn.ConvTranspose2d(condition_size, ngf, kernel_size=8,stride=1,padding=0)
    # 1
    self.decode = nn.ConvTranspose2d(hidden_size, ngf, kernel_size=8,stride=1,padding=0)
    # 8
    self.dconv6 = deconv_block(ngf*2, ngf)
    # 16
    self.dconv5 = deconv_block(ngf, ngf)
    # 32
    self.dconv4 = deconv_block(ngf, ngf)
    # 64 
    self.dconv3 = deconv_block(ngf, ngf)
    # 128 
    #self.dconv2 = deconv_block(ngf, ngf)
    # 256
    self.dconv1 = nn.Sequential(nn.Conv2d(ngf,ngf,kernel_size=3,stride=1,padding=1),
                                nn.ELU(True),
                                nn.Conv2d(ngf,ngf,kernel_size=3,stride=1,padding=1),
                                nn.ELU(True),
                                nn.Conv2d(ngf, nc,kernel_size=3, stride=1,padding=1),
                                nn.Tanh())
项目:food-GAN    作者:rtlee9    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netG, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d(     nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2,     ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(    ngf,      nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:food-GAN    作者:rtlee9    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netG, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d(     nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2,     ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(    ngf,      nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(z_dim+10, 4*4*256),
            nn.LeakyReLU()
        )

        self.cnn = nn.Sequential(
            nn.ConvTranspose2d(256, 128, 3, stride=2, padding=0, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(64, 64, 3, stride=2, padding=2, output_padding=1),
            nn.LeakyReLU(),
            nn.Conv2d(64, 3, 3, stride=1, padding=1),
            nn.Tanh()
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(100, 4*4*256),
            nn.LeakyReLU()
        )

        self.cnn = nn.Sequential(
            nn.ConvTranspose2d(256, 128, 3, stride=2, padding=0, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(64, 64, 3, stride=2, padding=2, output_padding=1),
            nn.LeakyReLU(),
            nn.Conv2d(64, 3, 3, stride=1, padding=1),
            nn.Tanh()
        )
项目:MNIST-invert-color    作者:BlackBindy    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, stride=2, kernel_size=4, padding=1), # 28*28 -> 14*14
            nn.BatchNorm2d(16),
            nn.LeakyReLU()
        )
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 16, stride=1, kernel_size=3, padding=1), # 14*14 -> 14*14
            nn.BatchNorm2d(16),
            nn.LeakyReLU()
        )
        self.layer3 = nn.Sequential(
            nn.ConvTranspose2d(16, 1, stride=2, kernel_size=4, padding=1), # 14*14 -> 28*28
            nn.Tanh()
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self):
        super(Encoder, self).__init__()
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, nz, 4, 1, 0, bias=False),
            nn.Tanh()
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self):
        super(Decoder, self).__init__()
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d(     nz, ngf * 8, 4, 1, 0, bias=False),
            #nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2,     ngf, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(    ngf,      nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(NetG, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is Z, going into a convolution
            nn.ConvTranspose2d(     nz, ngf * 8, 4, 1, 0, bias=False),
            #nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # state size. (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # state size. (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2,     ngf, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 32 x 32
            nn.ConvTranspose2d(    ngf,      nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )
项目:OpenNMT-py    作者:OpenNMT    | 项目源码 | 文件源码
def __init__(self, dim, coverage=False, attn_type="dot"):
        super(GlobalAttention, self).__init__()

        self.dim = dim
        self.attn_type = attn_type
        assert (self.attn_type in ["dot", "general", "mlp"]), (
                "Please select a valid attention type.")

        if self.attn_type == "general":
            self.linear_in = nn.Linear(dim, dim, bias=False)
        elif self.attn_type == "mlp":
            self.linear_context = BottleLinear(dim, dim, bias=False)
            self.linear_query = nn.Linear(dim, dim, bias=True)
            self.v = BottleLinear(dim, 1, bias=False)
        # mlp wants it with bias
        out_bias = self.attn_type == "mlp"
        self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)

        self.sm = nn.Softmax()
        self.tanh = nn.Tanh()

        if coverage:
            self.linear_cover = nn.Linear(1, dim, bias=False)
项目:Structured-Self-Attentive-Sentence-Embedding    作者:ExplorerFreda    | 项目源码 | 文件源码
def __init__(self, config):
        super(SelfAttentiveEncoder, self).__init__()
        self.bilstm = BiLSTM(config)
        self.drop = nn.Dropout(config['dropout'])
        self.ws1 = nn.Linear(config['nhid'] * 2, config['attention-unit'], bias=False)
        self.ws2 = nn.Linear(config['attention-unit'], config['attention-hops'], bias=False)
        self.tanh = nn.Tanh()
        self.softmax = nn.Softmax()
        self.dictionary = config['dictionary']
#        self.init_weights()
        self.attention_hops = config['attention-hops']
项目:Structured-Self-Attentive-Sentence-Embedding    作者:ExplorerFreda    | 项目源码 | 文件源码
def __init__(self, config):
        super(Classifier, self).__init__()
        if config['pooling'] == 'mean' or config['pooling'] == 'max':
            self.encoder = BiLSTM(config)
            self.fc = nn.Linear(config['nhid'] * 2, config['nfc'])
        elif config['pooling'] == 'all':
            self.encoder = SelfAttentiveEncoder(config)
            self.fc = nn.Linear(config['nhid'] * 2 * config['attention-hops'], config['nfc'])
        else:
            raise Exception('Error when initializing Classifier')
        self.drop = nn.Dropout(config['dropout'])
        self.tanh = nn.Tanh()
        self.pred = nn.Linear(config['nfc'], config['class-number'])
        self.dictionary = config['dictionary']
#        self.init_weights()
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[]):
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.gpu_ids = gpu_ids

        model = [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3),
                 norm_layer(ngf, affine=True),
                 nn.ReLU(True)]

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
                                stride=2, padding=1),
                      norm_layer(ngf * mult * 2, affine=True),
                      nn.ReLU(True)]

        mult = 2**n_downsampling
        for i in range(n_blocks):
            model += [ResnetBlock(ngf * mult, 'zero', norm_layer=norm_layer, use_dropout=use_dropout)]

        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
                                         kernel_size=3, stride=2,
                                         padding=1, output_padding=1),
                      norm_layer(int(ngf * mult / 2), affine=True),
                      nn.ReLU(True)]

        model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)]
        model += [nn.Tanh()]

        self.model = nn.Sequential(*model)
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def __init__(self, outer_nc, inner_nc,
                 submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost

        downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
                             stride=2, padding=1)
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = norm_layer(inner_nc, affine=True)
        uprelu = nn.ReLU(True)
        upnorm = norm_layer(outer_nc, affine=True)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)
项目:postfilt_gan    作者:bajibabu    | 项目源码 | 文件源码
def __init__(self, in_ch):
        super(_netG, self).__init__()
        self.in_ch = in_ch

        # Convolutional 1
        self.conv1 = nn.Sequential(
            # input shape [batch_size x 2 (noise + input mel-cepstrum) x 40 (mgc dim) x T]
            nn.Conv2d(in_ch, 128, 5, stride=1, padding=2, bias=True),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2, inplace=True))

        # Convolutional 2
        # input shape [batch_size x 128 + input mel-cepstrum x 40 x T]
        self.conv2 = nn.Sequential(
            nn.Conv2d(129, 256, 5, padding=2, bias=True),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2, inplace=True))

        # Convolutioanl 3
        # input shape [batch_size x 256 + input mel-cepstrum x 40 x T]
        self.conv3 = nn.Sequential(
            nn.Conv2d(257, 128, 5, padding=2, bias=True),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2, inplace=True))

        # Convolutional 4
        # input shape [batch_size x 128 + input mel-cepstrum x 40 x T]
        self.conv4 = nn.Sequential(
            nn.Conv2d(129, 1, 5, padding=2, bias=True),
            #nn.Tanh()
        )
        # final output shape [batch_size x 1 x 40 x T]
项目:SentEval    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
                 seed=1111, cudaEfficient=False):
        super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
                                             batch_size, seed, cudaEfficient)
        """
        PARAMETERS:
        -nhid:       number of hidden units (0: Logistic Regression)
        -optim:      optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
        -tenacity:   how many times dev acc does not increase before stopping
        -epoch_size: each epoch corresponds to epoch_size pass on the train set
        -max_epoch:  max number of epoches
        -dropout:    dropout for MLP
        """

        self.nhid = 0 if "nhid" not in params else params["nhid"]
        self.optim = "adam" if "optim" not in params else params["optim"]
        self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
        self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
        self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
        self.dropout = 0. if "dropout" not in params else params["dropout"]
        self.batch_size = 64 if "batch_size" not in params else params["batch_size"]

        if params["nhid"] == 0:
            self.model = nn.Sequential(
                nn.Linear(self.inputdim, self.nclasses),
                ).cuda()
        else:
            self.model = nn.Sequential(
                nn.Linear(self.inputdim, params["nhid"]),
                nn.Dropout(p=self.dropout),
                nn.Tanh(),
                nn.Linear(params["nhid"], self.nclasses),
                ).cuda()

        self.loss_fn = nn.CrossEntropyLoss().cuda()
        self.loss_fn.size_average = False

        optim_fn, optim_params = utils.get_optimizer(self.optim)
        self.optimizer = optim_fn(self.model.parameters(), **optim_params)
        self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
项目:DeepIllumination    作者:CreativeCodingLab    | 项目源码 | 文件源码
def __init__(self, n_channel_input, n_channel_output, n_filters):
        super(G, self).__init__()
        self.conv1 = nn.Conv2d(n_channel_input, n_filters, 4, 2, 1)
        self.conv2 = nn.Conv2d(n_filters, n_filters * 2, 4, 2, 1)
        self.conv3 = nn.Conv2d(n_filters * 2, n_filters * 4, 4, 2, 1)
        self.conv4 = nn.Conv2d(n_filters * 4, n_filters * 8, 4, 2, 1)
        self.conv5 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1)
        self.conv6 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1)
        self.conv7 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1)
        self.conv8 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1)

        self.deconv1 = nn.ConvTranspose2d(n_filters * 8, n_filters * 8, 4, 2, 1)
        self.deconv2 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1)
        self.deconv3 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1)
        self.deconv4 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1)
        self.deconv5 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 4, 4, 2, 1)
        self.deconv6 = nn.ConvTranspose2d(n_filters * 4 * 2, n_filters * 2, 4, 2, 1)
        self.deconv7 = nn.ConvTranspose2d(n_filters * 2 * 2, n_filters, 4, 2, 1)
        self.deconv8 = nn.ConvTranspose2d(n_filters * 2, n_channel_output, 4, 2, 1)

        self.batch_norm = nn.BatchNorm2d(n_filters)
        self.batch_norm2 = nn.BatchNorm2d(n_filters * 2)
        self.batch_norm4 = nn.BatchNorm2d(n_filters * 4)
        self.batch_norm8 = nn.BatchNorm2d(n_filters * 8)

        self.leaky_relu = nn.LeakyReLU(0.2, True)
        self.relu = nn.ReLU(True)

        self.dropout = nn.Dropout(0.5)

        self.tanh = nn.Tanh()
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __init__(self, out_h, out_w, channel_dims, z_dim=100):
        super().__init__()

        assert len(channel_dims) == 4, "length of channel dims should be 4"

        conv1_dim, conv2_dim, conv3_dim, conv4_dim = channel_dims
        conv1_h, conv2_h, conv3_h, conv4_h = map(conv_size, [(out_h, step) for step in [4 ,3 ,2 ,1]])
        conv1_w, conv2_w, conv3_w, conv4_w = map(conv_size, [(out_w, step) for step in [4 ,3 ,2 ,1]])

        self.fc = nn.Linear(z_dim, conv1_dim*conv1_h*conv1_w)
        self.deconvs = nn.Sequential(
                nn.BatchNorm2d(conv1_dim),
                nn.ReLU(),

                nn.ConvTranspose2d(conv1_dim, conv2_dim, kernel_size=4, stride=2, padding=1, bias=False),
                nn.BatchNorm2d(conv2_dim),
                nn.ReLU(),

                nn.ConvTranspose2d(conv2_dim, conv3_dim, kernel_size=4, stride=2, padding=1, bias=False),
                nn.BatchNorm2d(conv3_dim),
                nn.ReLU(),  

                nn.ConvTranspose2d(conv3_dim, conv4_dim, kernel_size=4, stride=2, padding=1, bias=False),
                nn.BatchNorm2d(conv4_dim),
                nn.ReLU(),          

                nn.ConvTranspose2d(conv4_dim, 3, kernel_size=4, stride=2, padding=1, bias=False),   
                nn.Tanh(),                          
            )
        self.conv1_size = (conv1_dim, conv1_h, conv1_w)

        self._init_weight()
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self, opt, data_agent):
        super().__init__()
        self.opt = opt

        self.input_emb = nn.Embedding(data_agent.wordcnt, opt['embedding_dim'], padding_idx=0)
        self.action_type_emb = nn.Embedding(data_agent.get_num_actions(), opt['action_type_emb_dim'])
        self.encoder = nn.GRU(opt['embedding_dim'], opt['rnn_h'], opt['rnn_layers'], batch_first=True, bidirectional=opt['bidir'])
        self.decoder = nn.Sequential(
            nn.Linear(opt['rnn_h'], 1),
        )
        self.log_softmax = nn.LogSoftmax()
        self.trans = nn.Sequential(
            nn.Linear(opt['rnn_h'] * (2 if opt['bidir'] else 1), opt['embedding_dim']),
            nn.Tanh(),
        )
        counter_emb = opt['counter_emb_dim']
        if opt['counter_ablation']:
            counter_emb = 0
        self.dec_gru = nn.GRU(opt['rnn_h'] * (2 if opt['bidir'] else 1) + counter_emb + (opt['embedding_dim'] if not opt['room_ablation'] else 0) + opt['action_type_emb_dim'] + opt['action_type_emb_dim'] + opt['embedding_dim'] + opt['embedding_dim'] + opt['rnn_h'] * (2 if opt['bidir'] else 1), opt['rnn_h'], opt['rnn_layers'], batch_first=True)
        self.merge = nn.Sequential(
            nn.Linear(opt['rnn_h'] * 2, opt['rnn_h']),
            nn.Tanh(),
        )
        self.counter_emb = nn.Embedding(opt['counter_max'] + 1, opt['counter_emb_dim'])
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, z_dim, rnn_dim):
        super(Combiner, self).__init__()
        # initialize the three linear transformations used in the neural network
        self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim)
        self.lin_hidden_to_mu = nn.Linear(rnn_dim, z_dim)
        self.lin_hidden_to_sigma = nn.Linear(rnn_dim, z_dim)
        # initialize the two non-linearities used in the neural network
        self.tanh = nn.Tanh()
        self.softplus = nn.Softplus()
项目:MMD-GAN    作者:OctoberChang    | 项目源码 | 文件源码
def __init__(self, isize, nc, k=100, ngf=64):
        super(Decoder, self).__init__()
        assert isize % 16 == 0, "isize has to be a multiple of 16"

        cngf, tisize = ngf // 2, 4
        while tisize != isize:
            cngf = cngf * 2
            tisize = tisize * 2

        main = nn.Sequential()
        main.add_module('initial.{0}-{1}.convt'.format(k, cngf), nn.ConvTranspose2d(k, cngf, 4, 1, 0, bias=False))
        main.add_module('initial.{0}.batchnorm'.format(cngf), nn.BatchNorm2d(cngf))
        main.add_module('initial.{0}.relu'.format(cngf), nn.ReLU(True))

        csize = 4
        while csize < isize // 2:
            main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf // 2),
                            nn.ConvTranspose2d(cngf, cngf // 2, 4, 2, 1, bias=False))
            main.add_module('pyramid.{0}.batchnorm'.format(cngf // 2),
                            nn.BatchNorm2d(cngf // 2))
            main.add_module('pyramid.{0}.relu'.format(cngf // 2),
                            nn.ReLU(True))
            cngf = cngf // 2
            csize = csize * 2

        main.add_module('final.{0}-{1}.convt'.format(cngf, nc), nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
        main.add_module('final.{0}.tanh'.format(nc),
                        nn.Tanh())

        self.main = main
项目:bandit-nmt    作者:khanhptnk    | 项目源码 | 文件源码
def __init__(self, dim):
        super(GlobalAttention, self).__init__()
        self.linear_in = nn.Linear(dim, dim, bias=False)
        self.sm = nn.Softmax()
        self.linear_out = nn.Linear(dim*2, dim, bias=False)
        self.tanh = nn.Tanh()
        self.mask = None
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def __init__(self, noise_input_size, cond_input_size):
        super(_netG, self).__init__()
        self.noise_input_size = noise_input_size
        self.cond_input_size = cond_input_size

        # first dense block
        # input shape [batch_size x 147]
        self.fc1 = nn.Sequential(
            nn.Linear(self.noise_input_size + self.cond_input_size, 100 * 10),
            nn.BatchNorm1d(100 * 10),
            nn.LeakyReLU(0.2, inplace=True)
        )
        # Convolutional block
        self.conv1 = nn.Sequential(
            # input shape [batch_size x 10 x 100]
            nn.ConvTranspose1d(10, 250, 13, stride=2, padding=6,
                              output_padding=1, bias=True),
            nn.BatchNorm1d(250),
            nn.LeakyReLU(0.2, inplace=True),

            # input shape [batch_size x 250 x 200]
            nn.ConvTranspose1d(250, 100, 13, stride=2, padding=6,
                              output_padding=1, bias=True),
            nn.BatchNorm1d(100),
            nn.LeakyReLU(0.2, inplace=True),

             # input shape [batch_size x 100 x 400]
            nn.ConvTranspose1d(100, 1, 13, stride=1, padding=6,
                              bias=True),
            nn.BatchNorm1d(1),
            # input shape [batch_size x 1 x 400]
            nn.Tanh()
        )
项目:end-to-end-negotiator    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self, n, k, nembed, nhid, init_range, device_id):
        super(MlpContextEncoder, self).__init__(device_id)

        # create separate embedding for counts and values
        self.cnt_enc = nn.Embedding(n, nembed)
        self.val_enc = nn.Embedding(n, nembed)

        self.encoder = nn.Sequential(
            nn.Tanh(),
            nn.Linear(k * nembed, nhid)
        )

        self.cnt_enc.weight.data.uniform_(-init_range, init_range)
        self.val_enc.weight.data.uniform_(-init_range, init_range)
        init_cont(self.encoder, init_range)
项目:NeuralMT    作者:hlt-mt    | 项目源码 | 文件源码
def __init__(self, embeddings_size, decoder_size,
                 attention_size, output_size):
        super(SourceContextGate, self).__init__()
        self.context_gate = ContextGate(embeddings_size, decoder_size,
                                        attention_size, output_size)
        self.tanh = nn.Tanh()
项目:NeuralMT    作者:hlt-mt    | 项目源码 | 文件源码
def __init__(self, embeddings_size, decoder_size,
                 attention_size, output_size):
        super(TargetContextGate, self).__init__()
        self.context_gate = ContextGate(embeddings_size, decoder_size,
                                        attention_size, output_size)
        self.tanh = nn.Tanh()
项目:NeuralMT    作者:hlt-mt    | 项目源码 | 文件源码
def __init__(self, embeddings_size, decoder_size,
                 attention_size, output_size):
        super(BothContextGate, self).__init__()
        self.context_gate = ContextGate(embeddings_size, decoder_size,
                                        attention_size, output_size)
        self.tanh = nn.Tanh()
项目:NeuralMT    作者:hlt-mt    | 项目源码 | 文件源码
def __init__(self, dim):
        super(GlobalAttention, self).__init__()
        self.linear_in = nn.Linear(dim, dim, bias=False)
        self.sm = nn.Softmax()
        self.linear_out = nn.Linear(dim*2, dim, bias=False)
        self.tanh = nn.Tanh()
        self.mask = None
项目:alpha-dimt-icmlws    作者:sotetsuk    | 项目源码 | 文件源码
def __init__(self, dim):
        super(GlobalAttention, self).__init__()
        self.linear_in = nn.Linear(dim, dim, bias=False)
        self.sm = nn.Softmax()
        self.linear_out = nn.Linear(dim*2, dim, bias=False)
        self.tanh = nn.Tanh()
        self.mask = None
项目:GAN-Zoo    作者:corenel    | 项目源码 | 文件源码
def __init__(self, input_size, hidden_size, output_size):
        """Init for Generator model."""
        super(Generator, self).__init__()
        self.layer = nn.Sequential(nn.Linear(input_size, hidden_size),
                                   nn.LeakyReLU(0.2),
                                   nn.Linear(hidden_size, hidden_size),
                                   nn.LeakyReLU(0.2),
                                   nn.Linear(hidden_size, output_size),
                                   nn.Tanh())
项目:GAN-Zoo    作者:corenel    | 项目源码 | 文件源码
def __init__(self, num_channels, z_dim, conv_dim, num_gpu):
        """Init for Generator model."""
        super(Generator, self).__init__()
        self.num_gpu = num_gpu
        self.layer = nn.Sequential(
            # 1st deconv layer, input Z, output (conv_dim*8) x 4 x 4
            nn.ConvTranspose2d(z_dim, conv_dim * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(conv_dim * 8),
            nn.ReLU(True),
            # 2nd deconv layer, output (conv_dim*4) x 8 x 8
            nn.ConvTranspose2d(conv_dim * 8, conv_dim * \
                               4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim * 4),
            nn.ReLU(True),
            # 3rd deconv layer, output (conv_dim*2) x 16 x 16
            nn.ConvTranspose2d(conv_dim * 4, conv_dim * \
                               2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim * 2),
            nn.ReLU(True),
            # 4th deconv layer, output (conv_dim) x 32 x 32
            nn.ConvTranspose2d(conv_dim * 2, conv_dim, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim),
            nn.ReLU(True),
            # output layer, output (num_channels) x 64 x 64
            nn.ConvTranspose2d(conv_dim, num_channels, 4, 2, 1, bias=False),
            nn.Tanh(),
        )
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def test_ListModule(self):
        modules = [nn.ReLU(), nn.Linear(5, 5)]
        module_list = nn.ModuleList(modules)

        def check():
            self.assertEqual(len(module_list), len(modules))
            for m1, m2 in zip(modules, module_list):
                self.assertIs(m1, m2)
            for m1, m2 in zip(modules, module_list.children()):
                self.assertIs(m1, m2)
            for i in range(len(modules)):
                self.assertIs(module_list[i], modules[i])

        check()
        modules += [nn.Conv2d(3, 4, 3)]
        module_list += [modules[-1]]
        check()
        modules.append(nn.Tanh())
        module_list.append(modules[-1])
        check()
        next_modules = [nn.Linear(5, 5), nn.Sigmoid()]
        modules.extend(next_modules)
        module_list.extend(next_modules)
        check()
        modules[2] = nn.Conv2d(5, 3, 2)
        module_list[2] = modules[2]
        check()

        with self.assertRaises(TypeError):
            module_list += nn.ReLU()
        with self.assertRaises(TypeError):
            module_list.extend(nn.ReLU())
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def tanh():
    return nn.Tanh()
项目:lr-gan.pytorch    作者:jwyang    | 项目源码 | 文件源码
def buildEncoderFC(self, depth_in, nsize_in, out_dim):
        net = nn.Sequential(
            nn.Linear(depth_in * nsize_in * nsize_in, out_dim),
            nn.BatchNorm1d(out_dim),
            nn.Tanh()
        )
        return net
项目:Deep-learning-with-cats    作者:AlexiaJM    | 项目源码 | 文件源码
def __init__(self):
        super(CycleGAN_G, self).__init__()
        ### Downsample block
        ## Reflection padding is an alternative to 0 padding (like looking at water reflection)
        # n_colors x image_size x image_size
        model = [nn.ReflectionPad2d(padding=3),
                nn.Conv2d(param.n_colors, param.G_h_size, kernel_size=7, stride=1, padding=0),
                Norm2D(param.G_h_size),
                nn.ReLU(True)]
        # param.G_h_size x image_size x image_size
        model += [nn.Conv2d(param.G_h_size, param.G_h_size * 2, kernel_size=3, stride=2, padding=1),
                Norm2D(param.G_h_size * 2),
                nn.ReLU(True)]
        # (param.G_h_size * 2) x (image_size / 2) x (image_size / 2)
        model += [nn.Conv2d(param.G_h_size * 2, param.G_h_size * 4, kernel_size=3, stride=2, padding=1),
                Norm2D(param.G_h_size * 4),
                nn.ReLU(True)]
        # (param.G_h_size * 4) x (image_size / 4) x (image_size / 4)

        ### Residual blocks
        for i in range(param.G_residual_blocks):
            model += [Residual_block(h_size=param.G_h_size * 4)]

        ### Upsample block (pretty much inverse of downsample)
        # (param.G_h_size * 4) x (image_size / 4) x (image_size / 4)
        model += [nn.ConvTranspose2d(param.G_h_size * 4, param.G_h_size * 2, kernel_size=3, stride=2, padding=1, output_padding=1),
                Norm2D(param.G_h_size * 2),
                nn.ReLU(True)]
        # (param.G_h_size * 2) x (image_size / 2) x (image_size / 2)
        model += [nn.ConvTranspose2d(param.G_h_size * 2, param.G_h_size, kernel_size=3, stride=2, padding=1, output_padding=1),
                Norm2D(param.G_h_size),
                nn.ReLU(True)]
        # param.G_h_size x image_size x image_size
        model += [nn.ReflectionPad2d(padding=3),
                nn.Conv2d(param.G_h_size, param.n_colors, kernel_size=7, stride=1, padding=0),
                nn.Tanh()]
        # Size = n_colors x image_size x image_size
        self.model = nn.Sequential(*model)
项目:Seq2Seq-PyTorch    作者:MaximumEntropy    | 项目源码 | 文件源码
def get_init_state_decoder(self, input):
        """Get init state for decoder."""
        decoder_init_state = nn.Tanh()(self.model.encoder2decoder(input))
        return decoder_init_state