Python torch.nn 模块,ELU 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用torch.nn.ELU

项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def _make_test_model():
        import torch.nn as nn
        from inferno.extensions.layers.reshape import AsMatrix

        toy_net = nn.Sequential(nn.Conv2d(3, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 256, 3, 1, 1),
                                nn.ELU(),
                                nn.AdaptiveAvgPool2d((1, 1)),
                                AsMatrix(),
                                nn.Linear(256, 10),
                                nn.Softmax())
        return toy_net
项目:CBEGAN    作者:taey16    | 项目源码 | 文件源码
def __init__(self, nc, ndf, hidden_size):
    super(Encoder, self).__init__()

    # 256
    self.conv1 = nn.Sequential(nn.Conv2d(nc,ndf,kernel_size=3,stride=1,padding=1),
                               nn.ELU(True))
    # 256
    self.conv2 = conv_block(ndf, ndf)
    # 128
    self.conv3 = conv_block(ndf, ndf*2)
    # 64
    self.conv4 = conv_block(ndf*2, ndf*3)
    # 32
    self.conv5 = conv_block(ndf*3, ndf*4)
    # 16
    #self.conv6 = conv_block(ndf*4, ndf*4)
    # 8
    self.encode = nn.Conv2d(ndf*4, hidden_size, kernel_size=8,stride=1,padding=0)
    # 1
项目:CBEGAN    作者:taey16    | 项目源码 | 文件源码
def __init__(self, nc, ngf, hidden_size, condition=False, condition_size=0):
    super(Decoder, self).__init__()
    self.condition = condition

    self.decode_cond = nn.ConvTranspose2d(condition_size, ngf, kernel_size=8,stride=1,padding=0)
    # 1
    self.decode = nn.ConvTranspose2d(hidden_size, ngf, kernel_size=8,stride=1,padding=0)
    # 8
    self.dconv6 = deconv_block(ngf*2, ngf)
    # 16
    self.dconv5 = deconv_block(ngf, ngf)
    # 32
    self.dconv4 = deconv_block(ngf, ngf)
    # 64 
    self.dconv3 = deconv_block(ngf, ngf)
    # 128 
    #self.dconv2 = deconv_block(ngf, ngf)
    # 256
    self.dconv1 = nn.Sequential(nn.Conv2d(ngf,ngf,kernel_size=3,stride=1,padding=1),
                                nn.ELU(True),
                                nn.Conv2d(ngf,ngf,kernel_size=3,stride=1,padding=1),
                                nn.ELU(True),
                                nn.Conv2d(ngf, nc,kernel_size=3, stride=1,padding=1),
                                nn.Tanh())
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(Generator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]
        self.noise_dim = opts.noise_dim


        hidden_lst = [self.cnn_feat_size + self.noise_dim] + opts.G_hidden + [self.cnn_feat_size]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()
        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                if opts.G_nonlinear == 'elu':
                    layers['elu%d' % n] = nn.ELU()
                elif opts.G_nonlinear == 'lrelu':
                    layers['leaky_relu%d'%n] = nn.LeakyReLU(0.2)


        self.net = nn.Sequential(layers)
项目:age    作者:ly015    | 项目源码 | 文件源码
def __init__(self, opts):

        super(ID_Generator, self).__init__()

        cnn_feat_map = {'resnet18': 512, 'resnet50': 2048, 'vgg16': 2048}
        self.cnn_feat_size = cnn_feat_map[opts.cnn]
        self.noise_dim = opts.noise_dim


        hidden_lst = [self.cnn_feat_size*2 + self.noise_dim] + opts.G_hidden + [self.cnn_feat_size]
        layers = OrderedDict()
        if opts.input_relu== 1:
            layers['relu'] = nn.ReLU()
        for n, (dim_in, dim_out) in enumerate(zip(hidden_lst, hidden_lst[1::])):
            layers['fc%d' % n] = nn.Linear(dim_in, dim_out, bias = False)
            if n < len(hidden_lst) - 2:
                layers['bn%d' % n] = nn.BatchNorm1d(dim_out)
                if opts.G_nonlinear == 'elu':
                    layers['elu%d' % n] = nn.ELU()
                elif opts.G_nonlinear == 'lrelu':
                    layers['leaky_relu%d'%n] = nn.LeakyReLU(0.2)


        self.net = nn.Sequential(layers)
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def _make_test_model():
        toy_net = nn.Sequential(nn.Conv2d(3, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 256, 3, 1, 1),
                                nn.ELU(),
                                nn.AdaptiveMaxPool2d((1, 1)),
                                AsMatrix(),
                                nn.Linear(256, 10),
                                nn.Softmax())
        return toy_net
项目:vnet.pytorch    作者:mattmacy    | 项目源码 | 文件源码
def ELUCons(elu, nchan):
    if elu:
        return nn.ELU(inplace=True)
    else:
        return nn.PReLU(nchan)

# normalization between sub-volumes is necessary
# for good performance
项目:malmo-challenge    作者:Kaixhin    | 项目源码 | 文件源码
def __init__(self, hidden_size):
    super(ActorCritic, self).__init__()
    self.state_size = STATE_SIZE[0] * STATE_SIZE[1] * STATE_SIZE[2]

    self.elu = nn.ELU(inplace=True)
    self.softmax = nn.Softmax()
    self.sigmoid = nn.Sigmoid()

    # Pass state into model body
    self.conv1 = nn.Conv2d(STATE_SIZE[0], 32, 4, stride=2)
    self.conv2 = nn.Conv2d(32, 32, 3)
    self.fc1 = nn.Linear(1152, hidden_size)
    # Pass previous action, reward and timestep directly into LSTM
    self.lstm = nn.LSTMCell(hidden_size + ACTION_SIZE + 2, hidden_size)
    self.fc_actor1 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_critic1 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_actor2 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_critic2 = nn.Linear(hidden_size, ACTION_SIZE)
    self.fc_class = nn.Linear(hidden_size, 1)

    # Orthogonal weight initialisation
    for name, p in self.named_parameters():
      if 'weight' in name:
        init.orthogonal(p)
      elif 'bias' in name:
        init.constant(p, 0)
    # Set LSTM forget gate bias to 1
    for name, p in self.lstm.named_parameters():
      if 'bias' in name:
        n = p.size(0)
        forget_start_idx, forget_end_idx = n // 4, n // 2
        init.constant(p[forget_start_idx:forget_end_idx], 1)
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def elu(alpha=1.0, inplace=False):
    return nn.ELU(alpha=alpha, inplace=inplace)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_inplace_thnn(self):
        modules = [nn.ReLU, nn.ELU, nn.SELU, nn.RReLU]
        for mod in modules:
            r = mod(inplace=True)
            input = Variable(torch.randn(5, 5), requires_grad=True)
            output = r(input + 0)
            grad_output = torch.randn(5, 5)
            grad_output_clone = grad_output.clone()
            output.backward(grad_output)
            self.assertEqual(grad_output, grad_output_clone)
项目:nn-transfer    作者:gzuidhof    | 项目源码 | 文件源码
def __init__(self):
        super(ELUNet, self).__init__()
        self.elu = nn.ELU()
项目:nn-transfer    作者:gzuidhof    | 项目源码 | 文件源码
def test_elu(self):
        keras_model = Sequential()
        keras_model.add(ELU(input_shape=(3, 32, 32), name='elu'))
        keras_model.compile(loss=keras.losses.categorical_crossentropy,
                            optimizer=keras.optimizers.SGD())

        pytorch_model = ELUNet()

        self.transfer(keras_model, pytorch_model)
        self.assertEqualPrediction(keras_model, pytorch_model, self.test_data)

    # Tests activation function with learned parameters
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_inplace_thnn(self):
        modules = [nn.ReLU, nn.ELU, nn.SELU, nn.RReLU]
        for mod in modules:
            r = mod(inplace=True)
            input = Variable(torch.randn(5, 5), requires_grad=True)
            output = r(input + 0)
            grad_output = torch.randn(5, 5)
            grad_output_clone = grad_output.clone()
            output.backward(grad_output)
            self.assertEqual(grad_output, grad_output_clone)
项目:fgan_info_geometric    作者:qulizhen    | 项目源码 | 文件源码
def __init__(self, isize, nz, nc, ngf, ngpu, hidden_activation, mu, last_layer='sigmoid'):
        super(GAN_G, self).__init__()
        self.ngpu = ngpu
        if hidden_activation == 'elu':
            first_activation = nn.ELU(mu)
            second_activation = nn.ELU(mu)
        elif hidden_activation == 'murelu':
            first_activation = NormalizedMatsushita(mu)
            second_activation = NormalizedMatsushita(mu)
        elif hidden_activation == 'ls':
            first_activation = LeastSquare()
            second_activation = LeastSquare()
        elif hidden_activation == 'sp':
            first_activation = NSoftPlus()
            second_activation = NSoftPlus()
        else:
            first_activation = nn.ReLU(False)
            second_activation = nn.ReLU(False)

        main = nn.Sequential(
            # Z goes into a linear of size: ngf
            nn.Linear(nz, ngf),
            nn.BatchNorm1d(ngf),
            first_activation,
            nn.Linear(ngf, ngf),
            nn.BatchNorm1d(ngf),
            second_activation,
            nn.Linear(ngf, nc * isize * isize)
        )
        if last_layer == 'sigmoid':
            main.add_module('top_sigmoid', torch.nn.Sigmoid())
        elif last_layer == 'tanh':
            main.add_module('top_tanh',torch.nn.Tanh())

        self.main = main
        self.nc = nc
        self.isize = isize
        self.nz = nz
项目:fgan_info_geometric    作者:qulizhen    | 项目源码 | 文件源码
def __init__(self, isize, nz, nc, ndf, ngpu,hidden_activation = 'relu', last_layer='', alpha=1.0):
        super(GAN_D, self).__init__()
        self.ngpu = ngpu

        if hidden_activation == 'elu':
            first_activation = nn.ELU(alpha=alpha)
            second_activation = nn.ELU(alpha=alpha)
        else:
            first_activation = nn.ReLU(False)
            second_activation = nn.ReLU(False)

        main = nn.Sequential(
            # Z goes into a linear of size: ndf
            nn.Linear(nc * isize * isize, ndf),
            first_activation,
            nn.Linear(ndf, ndf),
            second_activation,
            nn.Linear(ndf, 1)
        )
        if last_layer == 'sigmoid':
            main.add_module('top_sigmoid', torch.nn.Sigmoid())
        elif last_layer == 'tanh':
            main.add_module('top_tanh',torch.nn.Tanh())
        elif last_layer == 'matsu':
            main.add_module('final.{0}.Matsushita'.format(nc),
                            MatsushitaLinkFunc())

        self.main = main
        self.nc = nc
        self.isize = isize
        self.nz = nz
项目:CBEGAN    作者:taey16    | 项目源码 | 文件源码
def conv_block(in_dim, out_dim):
  return nn.Sequential(nn.Conv2d(in_dim,in_dim,kernel_size=3,stride=1,padding=1),
                       nn.ELU(True),
                       nn.Conv2d(in_dim,in_dim,kernel_size=3,stride=1,padding=1),
                       nn.ELU(True),
                       nn.Conv2d(in_dim,out_dim,kernel_size=1,stride=1,padding=0),
                       nn.AvgPool2d(kernel_size=2,stride=2))
项目:CBEGAN    作者:taey16    | 项目源码 | 文件源码
def deconv_block(in_dim, out_dim):
  return nn.Sequential(nn.Conv2d(in_dim,out_dim,kernel_size=3,stride=1,padding=1),
                       nn.ELU(True),
                       nn.Conv2d(out_dim,out_dim,kernel_size=3,stride=1,padding=1),
                       nn.ELU(True),
                       nn.UpsamplingNearest2d(scale_factor=2))
项目:fgan_info_geometric    作者:qulizhen    | 项目源码 | 文件源码
def __init__(self, isize, nz, nc, ngf, ngpu, hidden_activation='', mu=0.5, last_layer='none'):
        super(MLP_G, self).__init__()
        self.ngpu = ngpu
        if hidden_activation == 'matsu':
            first_activation = MatsushitaTransform()
            second_activation = MatsushitaTransform()
            third_activation = MatsushitaTransform()
        elif hidden_activation == 'matsu1':
            first_activation = MatsushitaTransformOne()
            second_activation = MatsushitaTransformOne()
            third_activation = MatsushitaTransformOne()
        elif hidden_activation == 'elu':
            first_activation = nn.ELU(alpha=mu)
            second_activation = nn.ELU(alpha=mu)
            third_activation = nn.ELU(alpha=mu)
        elif hidden_activation == 'murelu':
            first_activation = NormalizedMatsushita(mu)
            second_activation = NormalizedMatsushita(mu)
            third_activation = NormalizedMatsushita(mu)
        else:
            first_activation = nn.ReLU(False)
            second_activation = nn.ReLU(False)
            third_activation = nn.ReLU(False)
        main = nn.Sequential(
            # Z goes into a linear of size: ngf
            nn.Linear(nz, ngf),
            first_activation,
            nn.Linear(ngf, ngf),
            second_activation,
            nn.Linear(ngf, ngf),
            third_activation,
            nn.Linear(ngf, nc * isize * isize),
        )

        if last_layer == 'tanh':
            main.add_module('final.{0}.tanh'.format(nc),
                        nn.Tanh())
        elif last_layer == 'sigmoid':
            main.add_module('final.{0}.sigmoid'.format(nc),
                        nn.Sigmoid())

        self.main = main
        self.nc = nc
        self.isize = isize
        self.nz = nz