Python torch.nn 模块,Sigmoid() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.Sigmoid()

项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def __init__(self, dim_in, dim_z, config='pendulum'):
        super(AE, self).__init__()
        _, _, dec = load_config(config)

        # TODO, refactor encoder to allow output of dim_z instead of dim_z * 2
        self.encoder = nn.Sequential(
            nn.Linear(dim_in, 800),
            nn.BatchNorm1d(800),
            nn.ReLU(),
            nn.Linear(800, 800),
            nn.BatchNorm1d(800),
            nn.ReLU(),
            nn.Linear(800, dim_z),
            nn.BatchNorm1d(dim_z),
            nn.Sigmoid()
        )

        self.decoder = dec(dim_z, dim_in)
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, z_dim, transition_dim):
        super(GatedTransition, self).__init__()
        # initialize the six linear transformations used in the neural network
        self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)
        self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)
        self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)
        self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)
        self.lin_sig = nn.Linear(z_dim, z_dim)
        self.lin_z_to_mu = nn.Linear(z_dim, z_dim)
        # modify the default initialization of lin_z_to_mu
        # so that it's starts out as the identity function
        self.lin_z_to_mu.weight.data = torch.eye(z_dim)
        self.lin_z_to_mu.bias.data = torch.zeros(z_dim)
        # initialize the three non-linearities used in the neural network
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        self.softplus = nn.Softplus()
项目:SeqMatchSeq    作者:pcgreat    | 项目源码 | 文件源码
def new_proj_module(self):
        emb_dim = self.emb_dim
        mem_dim = self.mem_dim

        class NewProjModule(nn.Module):
            def __init__(self, emb_dim, mem_dim):
                super(NewProjModule, self).__init__()
                self.emb_dim = emb_dim
                self.mem_dim = mem_dim
                self.linear1 = nn.Linear(self.emb_dim, self.mem_dim)
                self.linear2 = nn.Linear(self.emb_dim, self.mem_dim)

            def forward(self, input):
                i = nn.Sigmoid()(self.linear1(input))
                u = nn.Tanh()(self.linear2(input))
                out = i.mul(u)  # CMulTable().updateOutput([i, u])
                return out

        module = NewProjModule(emb_dim, mem_dim)

        # if getattr(self, "proj_module_master", None):  # share parameters
        #     for (tar_param, src_param) in zip(module.parameters(), self.proj_module_master.parameters()):
        #         tar_param.grad.data = src_param.grad.data.clone()

        return module
项目:GAN-Zoo    作者:corenel    | 项目源码 | 文件源码
def __init__(self, num_channels, conv_dim, num_gpu):
        """Init for Discriminator model."""
        super(Discriminator, self).__init__()
        self.num_gpu = num_gpu
        self.layer = nn.Sequential(
            # 1st conv layer
            # input num_channels x 64 x 64, output conv_dim x 32 x 32
            nn.Conv2d(num_channels, conv_dim, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # 2nd conv layer, output (conv_dim*2) x 16 x 16
            nn.Conv2d(conv_dim, conv_dim * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # 3rd conv layer, output (conv_dim*4) x 8 x 8
            nn.Conv2d(conv_dim * 2, conv_dim * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # 4th conv layer, output (conv_dim*8) x 4 x 4
            nn.Conv2d(conv_dim * 4, conv_dim * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # output layer
            nn.Conv2d(conv_dim * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
项目:lr-gan.pytorch    作者:jwyang    | 项目源码 | 文件源码
def buildNet(self, nsize):
        net = nn.Sequential()
        depth_in = nc
        depth_out = ndf
        size_map = nsize
        while size_map > 4:
            name = str(size_map)
            net.add_module('conv' + name, nn.Conv2d(depth_in, depth_out, 4, 2, 1, bias=False))
            if size_map < nsize:
                net.add_module('bn' + name, nn.BatchNorm2d(depth_out))
            net.add_module('lrelu' + name, nn.LeakyReLU(0.2, inplace=True))
            depth_in = depth_out
            depth_out = 2 * depth_in
            size_map = size_map / 2
        name = str(size_map)
        net.add_module('conv' + name, nn.Conv2d(depth_in, 1, 4, 1, 0, bias=False))
        net.add_module('sigmoid' + name, nn.Sigmoid())
        return net
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss(self):
        sigmoid = nn.Sigmoid()

        target = Variable(torch.rand(64, 4))
        output = Variable(torch.rand(64, 4) - 0.5)

        self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))

        weight = torch.rand(4)
        self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))

        target = Variable(torch.FloatTensor(4, 1).fill_(0))
        output = Variable(torch.FloatTensor(4, 1).fill_(-100))

        self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))

        weight = torch.FloatTensor(1).uniform_()
        self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_bce_loss_broadcasts_weights(self):
        sigmoid = nn.Sigmoid()
        target = Variable(torch.rand(16, 4))
        output = Variable(torch.rand(16, 4) - 0.5)

        weight = torch.rand(4)
        out1 = nn.BCELoss(weight)(sigmoid(output), target)

        weight = weight.expand(16, 4).contiguous()
        out2 = nn.BCELoss(weight)(sigmoid(output), target)

        self.assertEqual(out1, out2)

        weight = torch.rand(16, 1)
        out1 = nn.BCELoss(weight)(sigmoid(output), target)

        weight = weight.expand(16, 4).contiguous()
        out2 = nn.BCELoss(weight)(sigmoid(output), target)

        self.assertEqual(out1, out2)
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def __init__(self, n_z, n_hidden, depth, ngpu):
        super(Code_Discriminator, self).__init__()

        self.n_z    = n_z
        self.ngpu   = ngpu
        main        = nn.Sequential()
        layer       = 1

        # Convert the n_z vector represent prior distribution/encoding of image using MLP as instructed in paper

        main.add_module('linear_{0}-{1}-{2}'.format(layer, n_z, n_hidden), nn.Linear(n_z, n_hidden))
        main.add_module('batchnorm_{0}-{1}'.format(layer, n_hidden), nn.BatchNorm1d(n_hidden))
        main.add_module('LeakyReLU_{0}'.format(layer), nn.LeakyReLU(0.2, inplace=True))

        for layer in range(2, depth):
            main.add_module('linear_{0}-{1}-{2}'.format(layer, n_hidden, n_hidden), nn.Linear(n_hidden, n_hidden))
            main.add_module('batchnorm_{0}-{1}'.format(layer, n_hidden), nn.BatchNorm1d(n_hidden))
            main.add_module('LeakyReLU_{0}'.format(layer), nn.LeakyReLU(0.2, inplace=True))

        layer       = layer + 1
        main.add_module('linear_{0}-{1}-{2}'.format(layer, n_hidden, 1), nn.Linear(n_hidden, 1))
        main.add_module('Sigmoid_{0}'.format(layer), nn.Sigmoid())

        self.code_dis   = main
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def make_conv_layer(layer_list, in_dim, out_dim, back_conv, batch_norm=True, activation='ReLU', k_s_p=[4,2,1]):
    k, s, p = k_s_p[0], k_s_p[1], k_s_p[2]
    if back_conv == False:
        layer_list.append(nn.Conv2d(in_dim, out_dim, kernel_size=k, stride=s, padding=p, bias=False))
    elif back_conv == True:
        layer_list.append(nn.ConvTranspose2d(in_dim, out_dim, kernel_size=k, stride=s, padding=p, bias=False))

    if batch_norm == True:
        layer_list.append(nn.BatchNorm2d(out_dim))

    if activation == 'ReLU':
        layer_list.append(nn.ReLU(True))
    elif activation == 'Sigmoid':
        layer_list.append(nn.Sigmoid())
    elif activation == 'Tanh':
        layer_list.append(nn.Tanh())
    elif activation == 'LeakyReLU':
        layer_list.append(nn.LeakyReLU(0.2, inplace=True))

    return layer_list
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
        super(PixelDiscriminator, self).__init__()
        self.gpu_ids = gpu_ids
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        self.net = [
            nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
            norm_layer(ndf * 2),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]

        if use_sigmoid:
            self.net.append(nn.Sigmoid())

        self.net = nn.Sequential(*self.net)
项目:examples    作者:pytorch    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
项目:pytorch-reverse-gan    作者:yxlao    | 项目源码 | 文件源码
def __init__(self, ngpu, ndf, nc):
        super(NetD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64: [64, 3, 64, 64]
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32: [64, 64, 32, 32]
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16: [64, 128, 16, 16]
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8: [64, 256, 8, 8]
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4: [64, 512, 4, 4]
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
            # output size: [64, 1, 1, 1]
        )
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss(self):
        sigmoid = nn.Sigmoid()

        target = Variable(torch.rand(64, 4))
        output = Variable(torch.rand(64, 4) - 0.5)

        self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))

        weight = torch.rand(4)
        self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))

        target = Variable(torch.FloatTensor(4, 1).fill_(0))
        output = Variable(torch.FloatTensor(4, 1).fill_(-100))

        self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))

        weight = torch.FloatTensor(1).uniform_()
        self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_bce_loss_broadcasts_weights(self):
        sigmoid = nn.Sigmoid()
        target = Variable(torch.rand(16, 4))
        output = Variable(torch.rand(16, 4) - 0.5)

        weight = torch.rand(4)
        out1 = nn.BCELoss(weight)(sigmoid(output), target)

        weight = weight.expand(16, 4).contiguous()
        out2 = nn.BCELoss(weight)(sigmoid(output), target)

        self.assertEqual(out1, out2)

        weight = torch.rand(16, 1)
        out1 = nn.BCELoss(weight)(sigmoid(output), target)

        weight = weight.expand(16, 4).contiguous()
        out2 = nn.BCELoss(weight)(sigmoid(output), target)

        self.assertEqual(out1, out2)
项目:pix2pix-pytorch    作者:1zb    | 项目源码 | 文件源码
def __init__(self, input_nc, target_nc, ndf):
        super(_netD, self).__init__()
        self.main = nn.Sequential(
            # input is (nc * 2) x 64 x 64
            nn.Conv2d(input_nc + target_nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 1, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 1, bias=False),
            nn.Sigmoid()
        )
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()
        self.main = nn.Sequential(
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
        self.apply(weights_init)
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self, sampler):
        super(Agent, self).__init__()

        self.sampler = sampler

        nc = 64
        self.conv0 = nn.Conv2d(self.sampler.num_memory_channels, nc, 3, stride=(1,1), padding=1, bias=False)
        self.conv1 = nn.Conv2d(nc, nc * 2, 3, stride=(1,2), padding=1, bias=False)
        self.conv2 = nn.Conv2d(nc * 2, nc * 4, 3, stride=(1,2), padding=1, bias=False)
        self.conv3 = nn.Conv2d(nc * 4, nc, 3, stride=(1,2), padding=1, bias=False)

        self.maxPool = nn.MaxPool2d(2)

        self.bn0 = nn.BatchNorm2d(nc * 1)
        self.bn1 = nn.BatchNorm2d(nc * 2)
        self.bn2 = nn.BatchNorm2d(nc * 4)
        self.bn3 = nn.BatchNorm2d(nc)

        self.fc0_size = 64 * 32 * 4
        self.fc0 = nn.Linear(self.fc0_size, sampler.num_future_channels * sampler.future_size, bias=False)

        self.tanh = nn.Tanh()
        self.sigmoid = nn.Sigmoid()

        self.apply(weights_init)
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self, mpii, batch_size):
        super(HeatmapModel, self).__init__()

        self.heatmap_size = mpii.heatmap_size

        ndf = 32
        self.conv0 = nn.Conv2d(mpii.image_num_components, ndf, 11, stride=2)
        self.conv1 = nn.Conv2d(ndf, ndf * 2, 9, stride=2)
        self.conv2 = nn.Conv2d(ndf * 2, ndf * 4, 7, stride=2)
        self.conv3 = nn.Conv2d(ndf * 4, ndf * 8, 5, stride=2)                

        self.fc0_size = 256 * 3 * 3
        self.fc0 = nn.Linear(self.fc0_size, mpii.heatmap_size * mpii.heatmap_size)        

        self.relu = nn.ReLU(inplace=True)
        self.tanh = nn.Tanh()
        self.sigmoid = nn.Sigmoid()

        self.loss = nn.BCELoss().cuda()

        self.images = Variable(torch.FloatTensor(batch_size, mpii.image_num_components, mpii.image_size, mpii.image_size)).cuda()
        self.labels = Variable(torch.FloatTensor(batch_size, self.heatmap_size, self.heatmap_size)).cuda()
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netG, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # b, nz, 1, 1
            nn.ConvTranspose2d(nz, 28 * 28, 1, stride=1, padding=0, bias=False),
            # b, 28*28, 1, 1
            nn.BatchNorm2d(28 * 28),
            nn.ReLU(True),
            nn.ConvTranspose2d(28 * 28, 14 * 14, 2, stride=2, padding=0, bias=False),
            # b, 14*14, 2, 2
            nn.BatchNorm2d(14 * 14),
            nn.ReLU(True),
            nn.ConvTranspose2d(14 * 14, 7 * 7, 2, stride=2, padding=0, bias=False),
            # b, 7*7, 4, 4
            nn.BatchNorm2d(7 * 7),
            nn.ReLU(True),
            nn.ConvTranspose2d(7 * 7, 1, 7, stride=7, padding=0, bias=False),
            # b. 1, 28, 28
            nn.Sigmoid()
        )
项目:infoGAN-pytorch    作者:pianomania    | 项目源码 | 文件源码
def __init__(self):
    super(G, self).__init__()

    self.main = nn.Sequential(
      nn.ConvTranspose2d(74, 1024, 1, 1, bias=False),
      nn.BatchNorm2d(1024),
      nn.ReLU(True),
      nn.ConvTranspose2d(1024, 128, 7, 1, bias=False),
      nn.BatchNorm2d(128),
      nn.ReLU(True),
      nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
      nn.BatchNorm2d(64),
      nn.ReLU(True),
      nn.ConvTranspose2d(64, 1, 4, 2, 1, bias=False),
      nn.Sigmoid()
    )
项目:draw_pytorch    作者:chenzhaomin123    | 项目源码 | 文件源码
def __init__(self,T,A,B,z_size,N,dec_size,enc_size):
        super(DrawModel,self).__init__()
        self.T = T
        # self.batch_size = batch_size
        self.A = A
        self.B = B
        self.z_size = z_size
        self.N = N
        self.dec_size = dec_size
        self.enc_size = enc_size
        self.cs = [0] * T
        self.logsigmas,self.sigmas,self.mus = [0] * T,[0] * T,[0] * T

        self.encoder = nn.LSTMCell(2 * N * N + dec_size, enc_size)
        self.encoder_gru = nn.GRUCell(2 * N * N + dec_size, enc_size)
        self.mu_linear = nn.Linear(dec_size, z_size)
        self.sigma_linear = nn.Linear(dec_size, z_size)

        self.decoder = nn.LSTMCell(z_size,dec_size)
        self.decoder_gru = nn.GRUCell(z_size,dec_size)
        self.dec_linear = nn.Linear(dec_size,5)
        self.dec_w_linear = nn.Linear(dec_size,N*N)

        self.sigmoid = nn.Sigmoid()
项目:food-GAN    作者:rtlee9    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv2d(3, 64, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 128, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(128, 256, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2)
        )
        self.fc = nn.Sequential(
            nn.Linear(4*4*256, 128),
            nn.LeakyReLU(),
            nn.Dropout(0.5),
            nn.Linear(128, 1),
            nn.Sigmoid()
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv2d(3, 64, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 128, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(128, 256, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2)
        )
        self.fc = nn.Sequential(
            nn.Linear(4*4*256, 128),
            nn.LeakyReLU(),
            nn.Dropout(0.5),
            nn.Linear(128, 1),
            nn.Sigmoid()
        )
项目:LSH_Memory    作者:RUSH-LAB    | 项目源码 | 文件源码
def __init__(self, input_shape):
        super(Net, self).__init__()
        ch, row, col = input_shape
        kernel = 3
        pad = int((kernel-1)/2.0)

        self.predict = nn.Linear(128, 2)

        self.convolution = nn.Sequential(
            nn.Conv2d(ch, 64, kernel, padding=pad),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, kernel, padding=pad),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 128, kernel, padding=pad),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, kernel, padding=pad),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2,2)
        )

        self.fc = nn.Sequential(
            nn.Linear(row // 4 * col // 4 * 128, 128),
            nn.Sigmoid()
        )
项目:Medical-named-entity-recognition-for-ccks2017    作者:fangwater    | 项目源码 | 文件源码
def _get_labelwise_loss(self, feats, tags):
        '''
        Training Conditional Random Fields for Maximum Labelwise Accuracy
        '''
        # Get the marginal distribution
        score, _ = self._marginal_decode(feats)
        tags = tags.data.numpy()

        loss = autograd.Variable(torch.Tensor([0.]))
        Q = nn.Sigmoid()
        for tag, log_p in zip(tags, score):
            Pw = log_p[tag]
            if tag == 0:
                not_tag = log_p[1:]
            elif tag == len(log_p) - 1:
                not_tag = log_p[:tag]
            else:
                not_tag = torch.cat((log_p[:tag], log_p[tag+1:]))
            maxPw = torch.max(not_tag)
            loss = loss - Q(Pw - maxPw)
        return loss
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(NetD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(Discriminator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]

        hidden_lst = [self.cnn_feat_size] + opts.D_hidden + [1]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()

        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                layers['leaky_relu%d' % n] = nn.LeakyReLU(0.2)
        layers['sigmoid'] = nn.Sigmoid()

        self.net = nn.Sequential(layers)
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts, fix_decoder = True):
        super(MD_Discriminator, self).__init__()

        self.decoder = decoder_model.DecoderModel(fn = 'models/%s/best.pth' % opts.decoder_id)

        self.discriminator = nn.Sequential(
            nn.Conv2d(6, 64, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 256, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(256),
            nn.Conv2d(256, 512, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(512),
            nn.Conv2d(512, 1, 7, 1, 0, bias = False),
            nn.Sigmoid()
            )

        self.is_decoder_fixed = fix_decoder
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts, fix_decoder = True):
        super(D_Discriminator, self).__init__()

        self.decoder = decoder_model.DecoderModel(fn = 'models/%s/best.pth' % opts.decoder_id)

        self.discriminator = nn.Sequential(
            nn.Conv2d(3, 64, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 256, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(256),
            nn.Conv2d(256, 512, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(512),
            nn.Conv2d(512, 1, 7, 1, 0, bias = False),
            nn.Sigmoid()
        )

        self.decoder.eval()
        self.is_decoder_fixed = fix_decoder
项目:pix2pix    作者:leVirve    | 项目源码 | 文件源码
def __init__(self, input_nc, output_nc, ndf):
        """ Discriminator model

        Args:
            input_nc: input image dimension
            output_nc: output image dimension
            ngf: the number of filters
        """
        super().__init__()
        std_layer = encoder_layer
        self.model = nn.Sequential(
            std_layer(input_nc + output_nc, ndf, activation=False, batchnorm=False),
            std_layer(ndf, ndf * 2),
            std_layer(ndf * 2, ndf * 4),
            std_layer(ndf * 4, ndf * 8, stride=1),
            std_layer(ndf * 8, 1, stride=1),
            nn.Sigmoid()
        )
        self.apply(weights_init)
项目:future-price-predictor    作者:htfy96    | 项目源码 | 文件源码
def __init__(self, batch_size, rnn_len=5, hidden_state=64, feature_num=29, var_hidden=None, dropout=False):
        super(RNNModel, self).__init__()
        self.n_layer = rnn_len
        self.nhid = hidden_state

        self.l0 = nn.Linear(feature_num, feature_num)

        # self.d1 = nn.Dropout(p=0.2)
        if var_hidden is None:
            self.rnn = nn.LSTM(input_size=feature_num, hidden_size=hidden_state, num_layers=rnn_len, batch_first=True)
            rnn_output_size = hidden_state
        else:
            self.hidden_arr = var_hidden
            for i, state_num in enumerate(var_hidden):
                assert (rnn_len == len(var_hidden))
                last_size = var_hidden[i - 1] if i > 0 else feature_num
                setattr(self, 'rnn_{}'.format(i),
                        nn.LSTM(input_size=last_size, hidden_size=state_num, num_layers=1, batch_first=True))
                rnn_output_size = var_hidden[-1]
        # (N * 500 * 128)
        # (N * 128)
        # self.l1 = nn.Linear(hidden_state, hidden_state)
        # self.a1 = nn.Sigmoid()
        # (N * 128)

        # (N * 128)
        self._dropout = dropout
        if dropout:
            self.do = nn.Dropout(p=0.2)

        self.l2 = nn.Linear(rnn_output_size, 2)
        # (N * 2)
        self.softmax = nn.Softmax()

        # (100, 128)
        self.init_weights()
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
        super(NLayerDiscriminator, self).__init__()
        self.gpu_ids = gpu_ids

        kw = 4
        padw = int(np.ceil((kw-1)/2))
        sequence = [
            nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2, True)
        ]

        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
                                kernel_size=kw, stride=2, padding=padw),
                # TODO: use InstanceNorm
                norm_layer(ndf * nf_mult, affine=True),
                nn.LeakyReLU(0.2, True)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        sequence += [
            nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
                            kernel_size=kw, stride=1, padding=padw),
            # TODO: useInstanceNorm
            norm_layer(ndf * nf_mult, affine=True),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]

        if use_sigmoid:
            sequence += [nn.Sigmoid()]

        self.model = nn.Sequential(*sequence)
项目:postfilt_gan    作者:bajibabu    | 项目源码 | 文件源码
def __init__(self):
        super(_netD, self).__init__()

        # Convolutional block
        self.conv1 = nn.Sequential(
            # input shape batch_size x 1 (number of channels) x 40 (mgc dim) x 40 (time)
            nn.Conv2d(1, 64, 5, stride=2, bias=True),
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 64 x 18 x 18]
            nn.Conv2d(64, 128, 5, stride=2, bias=True),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 128 x 7 x 7]
            nn.Conv2d(128, 256, 5, stride=2, bias=True),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 256 x 3 x 3]
            nn.Conv2d(256, 128, 3, stride=2, bias=True),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2, inplace=True)
        )
        # after flatten [batch_size x 128 * 1 * 1]
        # Dense block
        self.fc1 = nn.Sequential(
            nn.Linear(128, 1),
            nn.Sigmoid()
        )
        # final output shape [batch_size x 1]
项目:DeepIllumination    作者:CreativeCodingLab    | 项目源码 | 文件源码
def __init__(self, n_channel_input, n_channel_output, n_filters):
        super(D, self).__init__()
        self.conv1 = nn.Conv2d(n_channel_input + n_channel_output, n_filters, 4, 2, 1)
        self.conv2 = nn.Conv2d(n_filters, n_filters * 2, 4, 2, 1)
        self.conv3 = nn.Conv2d(n_filters * 2, n_filters * 4, 4, 2, 1)
        self.conv4 = nn.Conv2d(n_filters * 4, n_filters * 8, 4, 1, 1)
        self.conv5 = nn.Conv2d(n_filters * 8, 1, 4, 1, 1)

        self.batch_norm2 = nn.BatchNorm2d(n_filters * 2)
        self.batch_norm4 = nn.BatchNorm2d(n_filters * 4)
        self.batch_norm8 = nn.BatchNorm2d(n_filters * 8)

        self.leaky_relu = nn.LeakyReLU(0.2, True)

        self.sigmoid = nn.Sigmoid()
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, z_dim, hidden_dim):
        super(Decoder, self).__init__()
        # setup the three linear transformations used
        self.fc1 = nn.Linear(z_dim, hidden_dim)
        self.fc21 = nn.Linear(hidden_dim, 784)
        # setup the non-linearity
        self.softplus = nn.Softplus()
        self.sigmoid = nn.Sigmoid()
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self):
        super(Decoder, self).__init__()
        self.fc3 = nn.Linear(20, 400)
        self.fc4 = nn.Linear(400, 784)
        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, input_dim, z_dim, emission_dim):
        super(Emitter, self).__init__()
        # initialize the three linear transformations used in the neural network
        self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim)
        self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim)
        self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim)
        # initialize the two non-linearities used in the neural network
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, input_dim, hidden_dim, sigmoid_bias=2.0, permutation=None):
        super(InverseAutoregressiveFlow, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.arn = AutoRegressiveNN(input_dim, hidden_dim, output_dim_multiplier=2, permutation=permutation)
        self.sigmoid = nn.Sigmoid()
        self.sigmoid_bias = Variable(torch.Tensor([sigmoid_bias]))
        self._intermediates_cache = {}
        self.add_inverse_to_cache = True
项目:pytorch-tutorial    作者:yunjey    | 项目源码 | 文件源码
def __init__(self, image_size=784, h_dim=400, z_dim=20):
        super(VAE, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(image_size, h_dim),
            nn.LeakyReLU(0.2),
            nn.Linear(h_dim, z_dim*2))  # 2 for mean and variance.

        self.decoder = nn.Sequential(
            nn.Linear(z_dim, h_dim),
            nn.ReLU(),
            nn.Linear(h_dim, image_size),
            nn.Sigmoid())
项目:FewShotLearning    作者:gitabcworld    | 项目源码 | 文件源码
def forward(self, input_, grads_, hx):
        """
        Args:
            input_: A (batch, input_size) tensor containing input
                features.
            hx: A tuple (h_0, c_0), which contains the initial hidden
                and cell state, where the size of both states is
                (batch, hidden_size).
        Returns:
            h_1, c_1: Tensors containing the next hidden and cell state.
        """

        # next forget, input gate
        (fS, iS, cS, deltaS) = hx
        fS = torch.cat((cS, fS), 1)
        iS = torch.cat((cS, iS), 1)

        fS = torch.mm(torch.cat((input_,fS), 1),self.WF)
        fS += self.bF.expand_as(fS)

        iS = torch.mm(torch.cat((input_,iS), 1),self.WI)
        iS += self.bI.expand_as(iS)

        # next delta
        deltaS = self.m * deltaS - nn.Sigmoid()(iS).mul(grads_)

        # next cell/params
        cS = nn.Sigmoid()(fS).mul(cS) + deltaS

        return fS, iS, cS, deltaS
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def __init__(self, cond_input_size):
        super(_netD, self).__init__()
        self.cond_input_size = cond_input_size
        # Convolutional block
        self.conv1 = nn.Sequential(
            # input shape batch_size x 1 (number of channels) x 400 (length of pulse)
            nn.Conv1d(1, 100, 13, stride=5, padding=6, bias=True),
            nn.BatchNorm1d(100),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 100 x 80]
            nn.Conv1d(100, 250, 13, stride=5, padding=6, bias=True),
            nn.BatchNorm1d(250),
            nn.LeakyReLU(0.2, inplace=True),

            # shape [batch_size x 250 x 16]
            nn.Conv1d(250, 300, 13, stride=4, padding=6, bias=True),
            nn.BatchNorm1d(300),
            nn.LeakyReLU(0.2, inplace=True)
            # shape [batch_size x 300 x 4]
        )
        # after flatten 300 * 4 + 47 (conditional input size)
        # Dense block
        self.fc1 = nn.Sequential(
            nn.Linear(1200 + self.cond_input_size, 200),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(200,1),
            nn.Sigmoid()
        )
项目:malmo-challenge    作者:Kaixhin    | 项目源码 | 文件源码
def __init__(self, hidden_size):
    super(ActorCritic, self).__init__()
    self.state_size = STATE_SIZE[0] * STATE_SIZE[1] * STATE_SIZE[2]

    self.elu = nn.ELU(inplace=True)
    self.softmax = nn.Softmax()
    self.sigmoid = nn.Sigmoid()

    # Pass state into model body
    self.conv1 = nn.Conv2d(STATE_SIZE[0], 32, 4, stride=2)
    self.conv2 = nn.Conv2d(32, 32, 3)
    self.fc1 = nn.Linear(1152, hidden_size)
    # Pass previous action, reward and timestep directly into LSTM
    self.lstm = nn.LSTMCell(hidden_size + ACTION_SIZE + 2, hidden_size)
    self.fc_actor1 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_critic1 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_actor2 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_critic2 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_class = nn.Linear(hidden_size, 1)

    # Orthogonal weight initialisation
    for name, p in self.named_parameters():
      if 'weight' in name:
        init.orthogonal(p)
      elif 'bias' in name:
        init.constant(p, 0)
    # Set LSTM forget gate bias to 1
    for name, p in self.lstm.named_parameters():
      if 'bias' in name:
        n = p.size(0)
        forget_start_idx, forget_end_idx = n // 4, n // 2
        init.constant(p[forget_start_idx:forget_end_idx], 1)
项目:R-net    作者:matthew-z    | 项目源码 | 文件源码
def __init__(self, question_embed_size, passage_embed_size, hidden_size,
                 attention_layer_factory, attn_args, attn_kwags, attn_mode="pair_encoding", num_layers=1,
                 dropout=0, bias=True, rnn_cell=nn.GRUCell, residual=False,
                 gated=True):
        input_size = question_embed_size + passage_embed_size
        super().__init__(input_size, hidden_size, num_layers,
                         dropout, bias, rnn_cell, residual)
        self.attention = attention_layer_factory(*attn_args, **attn_kwags)
        self.gated = gated
        self.attn_mode = attn_mode
        if gated:
            self.gate = nn.Sequential(
                nn.Linear(input_size, input_size, bias=False),
                nn.Sigmoid()
            )
项目:NeuralMT    作者:hlt-mt    | 项目源码 | 文件源码
def __init__(self, embeddings_size, decoder_size,
                 attention_size, output_size):
        super(ContextGate, self).__init__()
        input_size = embeddings_size + decoder_size + attention_size
        self.gate = nn.Linear(input_size, output_size, bias=True)
        self.sig = nn.Sigmoid()
        self.source_proj = nn.Linear(attention_size, output_size)
        self.target_proj = nn.Linear(embeddings_size + decoder_size,
                                     output_size)
项目:action-detection    作者:yjxiong    | 项目源码 | 文件源码
def __init__(self, ohem_ratio=0.17):
        super(CompletenessLoss, self).__init__()
        self.ohem_ratio = ohem_ratio

        self.sigmoid = nn.Sigmoid()
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def forward(self, x, samples):
        x = self.drop(x)
        x = F.relu(self.lin1(x))
        x = self.drop(x)
        x = self.lin2(x)
        modes = x[:,0].unsqueeze(1)
        certainties = x[:,1].unsqueeze(1)
        modes = nn.Sigmoid()(modes)
        certainties = nn.Softplus()(certainties) * self.softplus_boost
        # To do: check if mins are < maxs, if not, raise warning and return success = false
        prior_mins = Variable(util.Tensor([s.distribution.prior_min for s in samples]), requires_grad=False)
        prior_maxs = Variable(util.Tensor([s.distribution.prior_max for s in samples]), requires_grad=False)
        return True, torch.cat([(modes * (prior_maxs - prior_mins) + prior_mins), certainties], 1)
项目:pyprob    作者:probprog    | 项目源码 | 文件源码
def forward(self, x, samples):
        x = self.drop(x)
        x = F.relu(self.lin1(x))
        x = self.drop(x)
        x = self.lin2(x)
        modes = x[:,0].unsqueeze(1)
        certainties = x[:,1].unsqueeze(1)
        modes = nn.Sigmoid()(modes)
        certainties = nn.Softplus()(certainties) * self.softplus_boost
        return True, torch.cat([modes, certainties], 1)
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def __init__(self, dim_in, dim_out):
        m = nn.Sequential(
            nn.Linear(dim_in, 200),
            nn.BatchNorm1d(200),
            nn.ReLU(),
            nn.Linear(200, 200),
            nn.BatchNorm1d(200),
            nn.ReLU(),
            nn.Linear(200, dim_out),
            nn.BatchNorm1d(dim_out),
            nn.Sigmoid()
        )
        super(PlaneDecoder, self).__init__(m, dim_in, dim_out)
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def __init__(self, dim_in, dim_out):
        m = nn.ModuleList([
            torch.nn.Linear(dim_in, 800),
            nn.BatchNorm1d(800),
            nn.ReLU(),
            torch.nn.Linear(800, 800),
            nn.BatchNorm1d(800),
            nn.ReLU(),
            nn.Linear(800, dim_out),
            nn.Sigmoid()
        ])
        super(PendulumDecoder, self).__init__(m, dim_in, dim_out)