Python torch.nn 模块,LeakyReLU() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.LeakyReLU()

项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def __init__(
            self,
            ):

        super(Discriminator, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, 4, 2, 1, bias=False)
        self.relu1 = nn.LeakyReLU(0.2, inplace=True)

        self.conv2 = nn.Conv2d(64, 64 * 2, 4, 2, 1, bias=False)
        self.bn2 = nn.BatchNorm2d(64 * 2)
        self.relu2 = nn.LeakyReLU(0.2, inplace=True)

        self.conv3 = nn.Conv2d(64 * 2, 64 * 4, 4, 2, 1, bias=False)
        self.bn3 = nn.BatchNorm2d(64 * 4)
        self.relu3 = nn.LeakyReLU(0.2, inplace=True)

        self.conv4 = nn.Conv2d(64 * 4, 64 * 8, 4, 2, 1, bias=False)
        self.bn4 = nn.BatchNorm2d(64 * 8)
        self.relu4 = nn.LeakyReLU(0.2, inplace=True)

        self.conv5 = nn.Conv2d(64 * 8, 1, 4, 1, 0, bias=False)
项目:GAN-Zoo    作者:corenel    | 项目源码 | 文件源码
def __init__(self, num_channels, conv_dim, num_gpu):
        """Init for Discriminator model."""
        super(Discriminator, self).__init__()
        self.num_gpu = num_gpu
        self.layer = nn.Sequential(
            # 1st conv layer
            # input num_channels x 64 x 64, output conv_dim x 32 x 32
            nn.Conv2d(num_channels, conv_dim, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # 2nd conv layer, output (conv_dim*2) x 16 x 16
            nn.Conv2d(conv_dim, conv_dim * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # 3rd conv layer, output (conv_dim*4) x 8 x 8
            nn.Conv2d(conv_dim * 2, conv_dim * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # 4th conv layer, output (conv_dim*8) x 4 x 4
            nn.Conv2d(conv_dim * 4, conv_dim * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(conv_dim * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # output layer
            nn.Conv2d(conv_dim * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
项目:lr-gan.pytorch    作者:jwyang    | 项目源码 | 文件源码
def buildNet(self, nsize):
        net = nn.Sequential()
        depth_in = nc
        depth_out = ndf
        size_map = nsize
        while size_map > 4:
            name = str(size_map)
            net.add_module('conv' + name, nn.Conv2d(depth_in, depth_out, 4, 2, 1, bias=False))
            if size_map < nsize:
                net.add_module('bn' + name, nn.BatchNorm2d(depth_out))
            net.add_module('lrelu' + name, nn.LeakyReLU(0.2, inplace=True))
            depth_in = depth_out
            depth_out = 2 * depth_in
            size_map = size_map / 2
        name = str(size_map)
        net.add_module('conv' + name, nn.Conv2d(depth_in, 1, 4, 1, 0, bias=False))
        net.add_module('sigmoid' + name, nn.Sigmoid())
        return net
项目:Deep-learning-with-cats    作者:AlexiaJM    | 项目源码 | 文件源码
def __init__(self):
        super(CycleGAN_D, self).__init__()

        # Size = n_colors x image_size x image_size
        model = [nn.Conv2d(param.n_colors, param.D_h_size, kernel_size=4, stride=2, padding=2),
                nn.LeakyReLU(0.2, inplace=True)]
        # Size = D_h_size x (image_size / 2) x (image_size / 2)
        model += [nn.Conv2d(param.D_h_size, param.D_h_size * 2, kernel_size=4, stride=2, padding=2),
                Norm2D(param.D_h_size * 2),
                nn.LeakyReLU(0.2, inplace=True)]
        # Size = (D_h_size * 2) x (image_size / 4) x (image_size / 4)
        model += [nn.Conv2d(param.D_h_size * 2, param.D_h_size * 4, kernel_size=4, stride=2, padding=2),
                Norm2D(param.D_h_size * 4),
                nn.LeakyReLU(0.2, inplace=True)]
        # Size = (D_h_size * 4) x (image_size / 8) x (image_size / 8)
        model += [nn.Conv2d(param.D_h_size * 4, param.D_h_size * 8, kernel_size=4, stride=1, padding=2),
                Norm2D(param.D_h_size * 8),
                nn.LeakyReLU(0.2, inplace=True)]
        # Size = (D_h_size * 8) x (image_size / 8) x (image_size / 8)
        model += [nn.Conv2d(param.D_h_size * 8, 1, kernel_size=2, stride=1, padding=2)]
        # Size = 1 x (image_size / 8)) x (image_size / 8)
        self.model = nn.Sequential(*model)
项目:StackGAN_pytorch    作者:qizhex    | 项目源码 | 文件源码
def __init__(self, high_res_model, lr_imsize, c_size):
        super(discriminator, self).__init__()
        self.df_dim = cfg.GAN.DF_DIM
        self.ef_dim = cfg.GAN.EMBEDDING_DIM
        self.d_context_template = nn.Sequential(
            nn.Linear(c_size, self.ef_dim),
            nn.LeakyReLU(negative_slope=0.2),
        )
        self.s = lr_imsize
        self.s2, self.s4, self.s8, self.s16 = \
            int(self.s / 2), int(self.s / 4), int(self.s / 8), int(self.s / 16)
        #channel should be put in the second dimension (the first is mini-batch)
        if high_res_model:
            self.d_image_template = hr_d_image_encoder(self.s)
        else:
            self.d_image_template = d_image_encoder(self.s)
        #assert self.ef_dim == self.df_dim
        self.discriminator_combine = nn.Sequential(
            custom_con2d((self.s16, self.s16), self.df_dim * 8 + self.ef_dim, self.df_dim * 8, (1, 1), (1, 1)),
            nn.BatchNorm2d(self.df_dim * 8),
            nn.LeakyReLU(negative_slope=0.2),
            custom_con2d((self.s16, self.s16), self.df_dim * 8, 1, (self.s16, self.s16), (self.s16, self.s16)),
        )
        #self.logSigmoid = torch.nn.LogSigmoid()
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def __init__(self, n_z, n_hidden, depth, ngpu):
        super(Code_Discriminator, self).__init__()

        self.n_z    = n_z
        self.ngpu   = ngpu
        main        = nn.Sequential()
        layer       = 1

        # Convert the n_z vector represent prior distribution/encoding of image using MLP as instructed in paper

        main.add_module('linear_{0}-{1}-{2}'.format(layer, n_z, n_hidden), nn.Linear(n_z, n_hidden))
        main.add_module('batchnorm_{0}-{1}'.format(layer, n_hidden), nn.BatchNorm1d(n_hidden))
        main.add_module('LeakyReLU_{0}'.format(layer), nn.LeakyReLU(0.2, inplace=True))

        for layer in range(2, depth):
            main.add_module('linear_{0}-{1}-{2}'.format(layer, n_hidden, n_hidden), nn.Linear(n_hidden, n_hidden))
            main.add_module('batchnorm_{0}-{1}'.format(layer, n_hidden), nn.BatchNorm1d(n_hidden))
            main.add_module('LeakyReLU_{0}'.format(layer), nn.LeakyReLU(0.2, inplace=True))

        layer       = layer + 1
        main.add_module('linear_{0}-{1}-{2}'.format(layer, n_hidden, 1), nn.Linear(n_hidden, 1))
        main.add_module('Sigmoid_{0}'.format(layer), nn.Sigmoid())

        self.code_dis   = main
项目:generative_zoo    作者:DL-IT    | 项目源码 | 文件源码
def make_conv_layer(layer_list, in_dim, out_dim, back_conv, batch_norm=True, activation='ReLU', k_s_p=[4,2,1]):
    k, s, p = k_s_p[0], k_s_p[1], k_s_p[2]
    if back_conv == False:
        layer_list.append(nn.Conv2d(in_dim, out_dim, kernel_size=k, stride=s, padding=p, bias=False))
    elif back_conv == True:
        layer_list.append(nn.ConvTranspose2d(in_dim, out_dim, kernel_size=k, stride=s, padding=p, bias=False))

    if batch_norm == True:
        layer_list.append(nn.BatchNorm2d(out_dim))

    if activation == 'ReLU':
        layer_list.append(nn.ReLU(True))
    elif activation == 'Sigmoid':
        layer_list.append(nn.Sigmoid())
    elif activation == 'Tanh':
        layer_list.append(nn.Tanh())
    elif activation == 'LeakyReLU':
        layer_list.append(nn.LeakyReLU(0.2, inplace=True))

    return layer_list
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
        super(PixelDiscriminator, self).__init__()
        self.gpu_ids = gpu_ids
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        self.net = [
            nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
            norm_layer(ndf * 2),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]

        if use_sigmoid:
            self.net.append(nn.Sigmoid())

        self.net = nn.Sequential(*self.net)
项目:examples    作者:pytorch    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
项目:alphaGAN    作者:yjchoe    | 项目源码 | 文件源码
def __init__(self, input_size=784, num_channels=1, code_size=50, gpu=True):
        super(Encoder, self).__init__()
        self.gpu = gpu
        assert input_size % 16 == 0, "input_size has to be a multiple of 16"

        # Convolutional modules
        conv = nn.Sequential()
        conv.add_module('pyramid.{0}-{1}.conv'.format(num_channels, 32),
                        nn.Conv2d(num_channels, 32, 6, 1, bias=False))
        conv.add_module('pyramid.{0}.batchnorm'.format(32),
                        nn.BatchNorm2d(32))
        conv.add_module('pyramid.{0}.relu'.format(32),
                        nn.LeakyReLU(0.2, inplace=True))
        conv.add_module('pyramid.{0}-{1}.conv'.format(32, 64),
                        nn.Conv2d(32, 64, 5, 2, bias=False))
        conv.add_module('pyramid.{0}.batchnorm'.format(64),
                        nn.BatchNorm2d(64))
        conv.add_module('pyramid.{0}.relu'.format(64),
                        nn.LeakyReLU(0.2, inplace=True))
        self.conv = conv

        # Final linear module
        self.fc = nn.Linear(64 * 10 * 10, code_size)
项目:pytorch-reverse-gan    作者:yxlao    | 项目源码 | 文件源码
def __init__(self, ngpu, ndf, nc):
        super(NetD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64: [64, 3, 64, 64]
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32: [64, 64, 32, 32]
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16: [64, 128, 16, 16]
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8: [64, 256, 8, 8]
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4: [64, 512, 4, 4]
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
            # output size: [64, 1, 1, 1]
        )
项目:keita    作者:iwasaki-kenta    | 项目源码 | 文件源码
def __init__(self, feature_size=64):
        super(OmniglotEncoder, self).__init__()
        self.layers = nn.ModuleList()

        first_block = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=feature_size, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(feature_size),
            nn.LeakyReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2)
        )
        self.layers.append(first_block)

        for layer_index in range(3):
            block = nn.Sequential(
                nn.Conv2d(in_channels=64, out_channels=feature_size, kernel_size=3, stride=1, padding=1),
                nn.BatchNorm2d(feature_size),
                nn.LeakyReLU(inplace=True),
                nn.MaxPool2d(kernel_size=2)
            )
            self.layers.append(block)

        self.fc = nn.Linear(feature_size, feature_size)
项目:pix2pix-pytorch    作者:1zb    | 项目源码 | 文件源码
def __init__(self, input_nc, target_nc, ndf):
        super(_netD, self).__init__()
        self.main = nn.Sequential(
            # input is (nc * 2) x 64 x 64
            nn.Conv2d(input_nc + target_nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 1, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 1, bias=False),
            nn.Sigmoid()
        )
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()
        self.main = nn.Sequential(
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
        self.apply(weights_init)
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()

        self.conv0 = nn.Conv1d(nc, ndf, 4, 2, 1, bias=False)
        self.conv1 = nn.Conv1d(ndf, ndf * 2, 4, 2, 1, bias=False)
        self.conv2 = nn.Conv1d(ndf * 2, ndf * 4, 4, 2, 1, bias=False)
        self.conv3 = nn.Conv1d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)

        self.fc0_size = 512 * 128
        self.fc0 = nn.Linear(self.fc0_size, 100)

        self.relu = nn.LeakyReLU(0.2, inplace=True)

        self.bn1 = nn.BatchNorm1d(ndf * 2)
        self.bn2 = nn.BatchNorm1d(ndf * 4)
        self.bn3 = nn.BatchNorm1d(ndf * 8)

        self.sigmoid = nn.Sigmoid()

        self.apply(weights_init)

        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2))
        #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)
项目:FaderNetworks    作者:facebookresearch    | 项目源码 | 文件源码
def __init__(self, params):
        super(Classifier, self).__init__()

        self.img_sz = params.img_sz
        self.img_fm = params.img_fm
        self.init_fm = params.init_fm
        self.max_fm = params.max_fm
        self.hid_dim = params.hid_dim
        self.attr = params.attr
        self.n_attr = params.n_attr

        self.n_clf_layers = int(np.log2(self.img_sz))
        self.conv_out_fm = min(self.init_fm * (2 ** (self.n_clf_layers - 1)), self.max_fm)

        # classifier layers are identical to encoder, but convolve until size 1
        enc_layers, _ = build_layers(self.img_sz, self.img_fm, self.init_fm, self.max_fm,
                                     self.n_clf_layers, self.n_attr, 0, 'convtranspose',
                                     False, 0, 0)

        self.conv_layers = nn.Sequential(*enc_layers)
        self.proj_layers = nn.Sequential(
            nn.Linear(self.conv_out_fm, self.hid_dim),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(self.hid_dim, self.n_attr)
        )
项目:pytorchnet    作者:human-analysis    | 项目源码 | 文件源码
def __init__(self, nfilters, nstack, noutputs, nmodules=1):
        super(HourGlassSingle, self).__init__()

        self.nstack = nstack
        self.nfilters = nfilters
        self.noutputs = noutputs
        self.nmodules = nmodules

        self.conv1 = nn.Conv2d(3,self.nfilters,kernel_size=7,stride=2,padding=3)
        self.bn1 = nn.BatchNorm2d(self.nfilters)
        self.relu = nn.LeakyReLU(0.2, inplace=False)
        self.r1 = Residual(self.nfilters,self.nfilters)
        self.pool = nn.MaxPool2d(2, stride=2)
        self.r2 = Residual(self.nfilters,self.nfilters)
        self.r3 = Residual(self.nfilters,self.noutputs)
        self.stack_hg = self.make_layers_()
        self.last_hg = HourGlassLast(self.nfilters, self.noutputs, self.nmodules)

        self.extracted_layers = ['HourGlassStack']
项目:food-GAN    作者:rtlee9    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
        )
项目:food-GAN    作者:rtlee9    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(_netD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(CNNEncodeLayer, self).__init__()
        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if batchnorm:
            main = nn.Sequential(
                nn.Conv2d(input, output, kernel=4, stride=2, padding=1),
                nn.BatchNorm2d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Conv2d(input, output, kernel=4, stride=2, padding=1),
                self.act,
            )
        self.conv = nn.Conv2d(output, 1, kernel=1, stride=1, padding=0)
        print ("Not implemented now...")
        return
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(EncodeLayer, self).__init__()
        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if batchnorm:
            main = nn.Sequential(
                nn.Linear(input, output),
                nn.BatchNorm1d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Linear(input, output),
                self.act,
            )
        self.main = main
        self.fc1 = nn.Linear(output, zdim)
        self.fc2 = nn.Linear(output, zdim)
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(DecodeLayer, self).__init__()

        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if input == 0:
            input = output
            self.fc = nn.Linear(zdim, input)
        else:
            self.fc = nn.Linear(zdim, input)
            input *= 2
        if batchnorm:
            main = nn.Sequential(
                nn.Linear(input, output),
                nn.BatchNorm1d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Linear(input, output),
                self.act,
            )
        self.main = main
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(CNNEncodeLayer, self).__init__()
        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if batchnorm:
            main = nn.Sequential(
                nn.Conv2d(input, output, kernel=4, stride=2, padding=1),
                nn.BatchNorm2d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Conv2d(input, output, kernel=4, stride=2, padding=1),
                self.act,
            )
        self.conv = nn.Conv2d(output, 1, kernel=1, stride=1, padding=0)
        print ("Not implemented now...")
        return
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input, output, zdim, batchnorm, activacation):
        super(DecodeLayer, self).__init__()

        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()
        if input == 0:
            input = output
            self.fc = nn.Linear(zdim, input)
        else:
            self.fc = nn.Linear(zdim, input)
            input *= 2
        if batchnorm:
            main = nn.Sequential(
                nn.Linear(input, output),
                nn.BatchNorm1d(output),
                self.act,
            )
        else:
            main = nn.Sequential(
                nn.Linear(input, output),
                self.act,
            )
        self.main = main
项目:DisentangleVAE    作者:Jueast    | 项目源码 | 文件源码
def __init__(self, input_dims, hidden=400, activacation="lrelu", batchnorm=False):
        super(Discriminator, self).__init__()
        if activacation == "lrelu":
            self.act = nn.LeakyReLU()
        else:
            self.act = nn.ReLU()

        self.nx = int(np.prod(input_dims))
        self.main = nn.Sequential(
            nn.Linear(self.nx, hidden),
            self.act,
            nn.Linear(hidden, hidden),
            self.act,
            nn.Linear(hidden, hidden),
            self.act,
            nn.Linear(hidden, 1)
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(z_dim+10, 4*4*256),
            nn.LeakyReLU()
        )

        self.cnn = nn.Sequential(
            nn.ConvTranspose2d(256, 128, 3, stride=2, padding=0, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(64, 64, 3, stride=2, padding=2, output_padding=1),
            nn.LeakyReLU(),
            nn.Conv2d(64, 3, 3, stride=1, padding=1),
            nn.Tanh()
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv2d(3, 64, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 128, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(128, 256, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2)
        )
        self.fc = nn.Sequential(
            nn.Linear(4*4*256, 128),
            nn.LeakyReLU(),
            nn.Dropout(0.5),
            nn.Linear(128, 1),
            nn.Sigmoid()
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Q, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv2d(3, 16, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(16, 32, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(32, 64, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2)
        )
        self.fc = nn.Sequential(
            nn.Linear(4 * 4 * 64, 128),
            nn.LeakyReLU(),
            nn.Linear(4*4*64, 10),
            nn.Softmax()
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(100, 4*4*256),
            nn.LeakyReLU()
        )

        self.cnn = nn.Sequential(
            nn.ConvTranspose2d(256, 128, 3, stride=2, padding=0, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=0),
            nn.LeakyReLU(),
            nn.ConvTranspose2d(64, 64, 3, stride=2, padding=2, output_padding=1),
            nn.LeakyReLU(),
            nn.Conv2d(64, 3, 3, stride=1, padding=1),
            nn.Tanh()
        )
项目:Machine-Learning    作者:hadikazemi    | 项目源码 | 文件源码
def __init__(self):
        super(Discriminator, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv2d(3, 64, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 128, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(128, 256, 3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.MaxPool2d(2, 2)
        )
        self.fc = nn.Sequential(
            nn.Linear(4*4*256, 128),
            nn.LeakyReLU(),
            nn.Dropout(0.5),
            nn.Linear(128, 1),
            nn.Sigmoid()
        )
项目:MNIST-invert-color    作者:BlackBindy    | 项目源码 | 文件源码
def __init__(self):
        super(Generator, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, stride=2, kernel_size=4, padding=1), # 28*28 -> 14*14
            nn.BatchNorm2d(16),
            nn.LeakyReLU()
        )
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 16, stride=1, kernel_size=3, padding=1), # 14*14 -> 14*14
            nn.BatchNorm2d(16),
            nn.LeakyReLU()
        )
        self.layer3 = nn.Sequential(
            nn.ConvTranspose2d(16, 1, stride=2, kernel_size=4, padding=1), # 14*14 -> 28*28
            nn.Tanh()
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self):
        super(Encoder, self).__init__()
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, nz, 4, 1, 0, bias=False),
            nn.Tanh()
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self, input_shape, base_filters, num_hidden, num_actions):
        super(CNN, self).__init__()
        num_input = int(np.prod(input_shape))
        self.convs = nn.Sequential(
            nn.Conv2d(input_shape[0], base_filters, 8, 4, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(base_filters, base_filters * 2, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(base_filters * 2, base_filters * 2, 3, 1, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 7 x 7
        )
        self.classifier = nn.Sequential(
            nn.Linear(base_filters * 2 * 10 * 10, num_hidden),
            nn.ReLU(),
            nn.Linear(num_hidden, num_actions),
            nn.Softmax()
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self, ngpu):
        super(NetD, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            #nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )
项目:pytorch-cns    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self, input_shape, base_filters, num_hidden, num_actions):
        super(CNN, self).__init__()
        num_input = int(np.prod(input_shape))
        self.num_hidden = num_hidden
        self.convs = nn.Sequential(
            nn.Conv2d(input_shape[0], base_filters, 5, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32
            nn.Conv2d(base_filters, base_filters * 2, 5, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 16 x 16
            nn.Conv2d(base_filters * 2, base_filters * 2, 5, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf*2) x 9 x 9
        )
        # for p in self.convs.parameters():
        #     p.requires_grad = False  # use random conv features
        #self.convs.apply(weights_init)
        self.conv_out_size = base_filters * 2 * 11 * 11
        self.rnn = nn.RNN(self.conv_out_size, self.num_hidden, batch_first=True)
        self.classifier = nn.Sequential(
            nn.Linear(num_hidden, num_actions),
            nn.Softmax()
        )
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(Generator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]
        self.noise_dim = opts.noise_dim


        hidden_lst = [self.cnn_feat_size + self.noise_dim] + opts.G_hidden + [self.cnn_feat_size]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()
        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                if opts.G_nonlinear == 'elu':
                    layers['elu%d' % n] = nn.ELU()
                elif opts.G_nonlinear == 'lrelu':
                    layers['leaky_relu%d'%n] = nn.LeakyReLU(0.2)


        self.net = nn.Sequential(layers)
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(ID_Generator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]
        self.noise_dim = opts.noise_dim


        hidden_lst = [self.cnn_feat_size*2 + self.noise_dim] + opts.G_hidden + [self.cnn_feat_size]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()
        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                if opts.G_nonlinear == 'elu':
                    layers['elu%d' % n] = nn.ELU()
                elif opts.G_nonlinear == 'lrelu':
                    layers['leaky_relu%d'%n] = nn.LeakyReLU(0.2)


        self.net = nn.Sequential(layers)
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(Discriminator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]

        hidden_lst = [self.cnn_feat_size] + opts.D_hidden + [1]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()

        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                layers['leaky_relu%d' % n] = nn.LeakyReLU(0.2)
        layers['sigmoid'] = nn.Sigmoid()

        self.net = nn.Sequential(layers)
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts, fix_decoder = True):
        super(MD_Discriminator, self).__init__()

        self.decoder = decoder_model.DecoderModel(fn = 'models/%s/best.pth' % opts.decoder_id)

        self.discriminator = nn.Sequential(
            nn.Conv2d(6, 64, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 256, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(256),
            nn.Conv2d(256, 512, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(512),
            nn.Conv2d(512, 1, 7, 1, 0, bias = False),
            nn.Sigmoid()
            )

        self.is_decoder_fixed = fix_decoder
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts, fix_decoder = True):
        super(D_Discriminator, self).__init__()

        self.decoder = decoder_model.DecoderModel(fn = 'models/%s/best.pth' % opts.decoder_id)

        self.discriminator = nn.Sequential(
            nn.Conv2d(3, 64, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(128),
            nn.Conv2d(128, 256, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(256),
            nn.Conv2d(256, 512, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2),
            nn.BatchNorm2d(512),
            nn.Conv2d(512, 1, 7, 1, 0, bias = False),
            nn.Sigmoid()
        )

        self.decoder.eval()
        self.is_decoder_fixed = fix_decoder
项目:superres    作者:ntomita    | 项目源码 | 文件源码
def make_layers(nopts):
    n = len(nopts['layer_type'])
    layers = []
    prev_filters = nopts['input_channels']
    for i in range(n):
        if nopts['layer_type'][i] == 'conv':
            curr_filters = nopts['num_filters'][i]
            layers.append(nn.Conv2d(
                prev_filters,
                curr_filters,
                nopts['kernel_size'][i],
                nopts['stride'][i],
                (nopts['kernel_size'][i]-1)//2))
            prev_filters = curr_filters
        elif nopts['layer_type'][i] == 'lrelu':
            layers.append(nn.LeakyReLU())
        elif nopts['layer_type'][i] == 'bn':
            curr_filters = nopts['num_filters'][i]
            layers.append(nn.BatchNorm2d(curr_filters))
            prev_filters = curr_filters
    return nn.Sequential(*layers)
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def __init__(self, num_layers, in_channels = 3, out_channels = 8, batch_norm = True):
        super(ConvEncoder2D, self).__init__()

        # set up number of layers
        if isinstance(num_layers, int):
            num_layers = [num_layers, 0]

        network = []

        # several 3x3 convolutional layers and max-pooling layers
        for k in range(num_layers[0]):
            # 2d convolution
            network.append(nn.Conv2d(in_channels, out_channels, 3, padding = 1))

            # batch normalization
            if batch_norm:
                network.append(nn.BatchNorm2d(out_channels))

            # non-linearity and max-pooling
            network.append(nn.LeakyReLU(0.2, True))
            network.append(nn.MaxPool2d(2))

            # double channel size
            in_channels = out_channels
            out_channels *= 2

        # several 1x1 convolutional layers
        for k in range(num_layers[1]):
            # 2d convolution
            network.append(nn.Conv2d(in_channels, in_channels, 1))

            # batch normalization
            if batch_norm:
                network.append(nn.BatchNorm2d(in_channels))

            # non-linearity
            network.append(nn.LeakyReLU(0.2, True))

        # set up modules for network
        self.network = nn.Sequential(*network)
        self.network.apply(weights_init)
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def __init__(self, num_layers, in_channels = 3, out_channels = 8, batch_norm = True):
        super(ConvEncoder3D, self).__init__()

        # set up number of layers
        if isinstance(num_layers, int):
            num_layers = [num_layers, 0]

        network = []

        # several 3x3 convolutional layers and max-pooling layers
        for k in range(num_layers[0]):
            # 3d convolution
            network.append(nn.Conv3d(in_channels, out_channels, 3, padding = 1))

            # batch normalization
            if batch_norm:
                network.append(nn.BatchNorm3d(out_channels))

            # non-linearity and max-pooling
            network.append(nn.LeakyReLU(0.2, True))
            network.append(nn.MaxPool3d(2))

            # double channel size
            in_channels = out_channels
            out_channels *= 2

        # several 1x1 convolutional layers
        for k in range(num_layers[1]):
            # 3d convolution
            network.append(nn.Conv3d(in_channels, in_channels, 1))

            # batch normalization
            if batch_norm:
                network.append(nn.BatchNorm3d(in_channels))

            # non-linearity
            network.append(nn.LeakyReLU(0.2, True))

        # set up modules for network
        self.network = nn.Sequential(*network)
        self.network.apply(weights_init)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, ndf=64):
        super(def_netD, self).__init__()

        sequence = [
            nn.Conv2d(4, ndf, kernel_size=4, stride=2, padding=1, bias=False),  # 128
            nn.LeakyReLU(0.2, True),

            Tunnel(1, ndf, ndf),
            DResNeXtBottleneck(ndf, ndf * 2, 2),  # 64

            Tunnel(2, ndf * 2, ndf * 2),
            DResNeXtBottleneck(ndf * 2, ndf * 4, 2),  # 32

            Tunnel(3, ndf * 4, ndf * 4),
            DResNeXtBottleneck(ndf * 4, ndf * 8, 2),  # 16

            Tunnel(4, ndf * 8, ndf * 8),
            DResNeXtBottleneck(ndf * 8, ndf * 16, 2),  # 8

            Tunnel(2, ndf * 16, ndf * 16),
            DResNeXtBottleneck(ndf * 16, ndf * 32, 2),  # 4

            nn.Conv2d(ndf * 32, 1, kernel_size=4, stride=1, padding=0, bias=False)

        ]

        self.model = nn.Sequential(*sequence)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, ndf=64):
        super(def_netD, self).__init__()

        sequence = [
            nn.Conv2d(1, ndf, kernel_size=4, stride=2, padding=1, bias=False),  # 256
            nn.LeakyReLU(0.2, True),

            ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1, stride=2),
            nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=False),  # 128
            ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1, stride=2),
            nn.Conv2d(ndf * 2, ndf * 4, kernel_size=1, stride=1, padding=0, bias=False),  # 64
            ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1, stride=2),
            # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=1, stride=1, padding=0, bias=False),  # 32
            # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 16
        ]

        self.model = nn.Sequential(*sequence)

        sequence = [
            nn.Conv2d(ndf * 4 + 3, ndf * 8, kernel_size=3, stride=1, padding=1, bias=False),  # 32
            nn.LeakyReLU(0.2, True),

            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 16
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 8
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 4
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            nn.Conv2d(ndf * 8, ndf * 8, kernel_size=4, stride=1, padding=0, bias=False),  # 1
            nn.LeakyReLU(0.2, True),

        ]

        self.prototype = nn.Sequential(*sequence)

        self.out = nn.Linear(512, 1)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, ndf=64):
        super(def_netD, self).__init__()

        sequence = [
            nn.Conv2d(4, ndf, kernel_size=4, stride=2, padding=1, bias=False),  # 128
            nn.LeakyReLU(0.2, True),

            Tunnel(1, ndf, ndf),
            DResNeXtBottleneck(ndf, ndf * 2, 2),  # 64

            Tunnel(2, ndf * 2, ndf * 2),
            DResNeXtBottleneck(ndf * 2, ndf * 4, 2),  # 32

            Tunnel(3, ndf * 4, ndf * 4),
            DResNeXtBottleneck(ndf * 4, ndf * 8, 2),  # 16

            Tunnel(4, ndf * 8, ndf * 8),
            DResNeXtBottleneck(ndf * 8, ndf * 16, 2),  # 8

            Tunnel(2, ndf * 16, ndf * 16),
            DResNeXtBottleneck(ndf * 16, ndf * 32, 2),  # 4

            nn.Conv2d(ndf * 32, 1, kernel_size=4, stride=1, padding=0, bias=False)

        ]

        self.model = nn.Sequential(*sequence)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, ndf, norm_layer=nn.BatchNorm2d):
        super(NLayerDiscriminator, self).__init__()

        kw = 4
        padw = 1
        self.ndf = ndf

        sequence = [
            nn.Conv2d(4, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [
            nn.Conv2d(ndf * 1, ndf * 2,
                      kernel_size=kw, stride=2, padding=padw),
            norm_layer(ndf * 2),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [
            nn.Conv2d(ndf * 2, ndf * 4,
                      kernel_size=kw, stride=2, padding=padw),
            norm_layer(ndf * 4),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [
            nn.Conv2d(ndf * 4, ndf * 8,
                      kernel_size=kw, stride=1, padding=padw),  # stride 1
            norm_layer(ndf * 8),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf * 8, 1, kernel_size=kw, stride=1, padding=padw)
        ]

        self.model = nn.Sequential(*sequence)

        LR_weight_init(self)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, ndf=64):
        super(def_netD, self).__init__()

        sequence = [
            nn.Conv2d(1, ndf, kernel_size=4, stride=2, padding=1, bias=False),  # 256
            nn.LeakyReLU(0.2, True),

            ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1, stride=2),
            nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=False),  # 128
            ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1, stride=2),
            nn.Conv2d(ndf * 2, ndf * 4, kernel_size=1, stride=1, padding=0, bias=False),  # 64
            ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1, stride=2),
            # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=1, stride=1, padding=0, bias=False),  # 32
            # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 16
        ]

        self.model = nn.Sequential(*sequence)

        sequence = [
            nn.Conv2d(ndf * 4 + 3, ndf * 8, kernel_size=3, stride=1, padding=1, bias=False),  # 32
            nn.LeakyReLU(0.2, True),

            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 16
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 8
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 4
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            nn.Conv2d(ndf * 8, ndf * 8, kernel_size=4, stride=1, padding=0, bias=False),  # 1
            nn.LeakyReLU(0.2, True),

        ]

        self.prototype = nn.Sequential(*sequence)

        self.out = nn.Linear(512, 1)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, ndf=64):
        super(def_netD, self).__init__()

        sequence = [
            nn.Conv2d(1, ndf, kernel_size=4, stride=2, padding=1, bias=False),  # 256
            nn.LeakyReLU(0.2, True),

            ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1, stride=2),
            nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=False),  # 128
            ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1, stride=2),
            nn.Conv2d(ndf * 2, ndf * 4, kernel_size=1, stride=1, padding=0, bias=False),  # 64
            ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1, stride=2),
            # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=1, stride=1, padding=0, bias=False),  # 32
            # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 16
        ]

        self.model = nn.Sequential(*sequence)

        sequence = [
            nn.Conv2d(ndf * 4 + 3, ndf * 8, kernel_size=3, stride=1, padding=1, bias=False),  # 32
            nn.LeakyReLU(0.2, True),

            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 16
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 8
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 4
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            nn.Conv2d(ndf * 8, ndf * 8, kernel_size=4, stride=1, padding=0, bias=False),  # 1
            nn.LeakyReLU(0.2, True),

        ]

        self.prototype = nn.Sequential(*sequence)

        self.out = nn.Linear(512, 1)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def __init__(self, ndf=64):
        super(def_netD, self).__init__()

        sequence = [
            nn.Conv2d(1, ndf, kernel_size=4, stride=2, padding=1, bias=False),  # 256
            nn.LeakyReLU(0.2, True),

            ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf, ndf, cardinality=8, dilate=1, stride=2),
            nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=False),  # 128
            ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 2, ndf * 2, cardinality=8, dilate=1, stride=2),
            nn.Conv2d(ndf * 2, ndf * 4, kernel_size=1, stride=1, padding=0, bias=False),  # 64
            ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 4, ndf * 4, cardinality=8, dilate=1, stride=2),
            # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=1, stride=1, padding=0, bias=False),  # 32
            # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            # ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 16
        ]

        self.model = nn.Sequential(*sequence)

        sequence = [
            nn.Conv2d(ndf * 4 + 3, ndf * 8, kernel_size=3, stride=1, padding=1, bias=False),  # 32
            nn.LeakyReLU(0.2, True),

            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 16
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 8
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1, stride=2),  # 4
            ResNeXtBottleneck(ndf * 8, ndf * 8, cardinality=8, dilate=1),
            nn.Conv2d(ndf * 8, ndf * 8, kernel_size=4, stride=1, padding=0, bias=False),  # 1
            nn.LeakyReLU(0.2, True),

        ]

        self.prototype = nn.Sequential(*sequence)

        self.out = nn.Linear(512, 1)