Python torch.nn 模块,UpsamplingNearest2d() 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用torch.nn.UpsamplingNearest2d()

项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def __init__(self, hps: HyperParams):
        super().__init__(hps)
        self.pool = nn.MaxPool2d(2, 2)
        self.pool_top = nn.MaxPool2d(hps.top_scale, hps.top_scale)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
        self.upsample_top = nn.UpsamplingNearest2d(scale_factor=hps.top_scale)
        filter_sizes = [hps.filters_base * s for s in self.filter_factors]
        self.down, self.up = [], []
        for i, nf in enumerate(filter_sizes):
            low_nf = hps.n_channels if i == 0 else filter_sizes[i - 1]
            self.down.append(self.module(hps, low_nf, nf))
            setattr(self, 'down_{}'.format(i), self.down[-1])
            if i != 0:
                self.up.append(self.module(hps, low_nf + nf, low_nf))
                setattr(self, 'conv_up_{}'.format(i), self.up[-1])
        self.conv_final = nn.Conv2d(filter_sizes[0], hps.n_classes, 1)
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def __init__(self, hps):
        super().__init__(hps)
        b = hps.filters_base
        self.filters = [b * 2, b * 2, b * 4, b * 8, b * 16]
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
        self.down, self.down_pool, self.mid, self.up = [[] for _ in range(4)]
        for i, nf in enumerate(self.filters):
            low_nf = hps.n_channels if i == 0 else self.filters[i - 1]
            self.down_pool.append(
                nn.Conv2d(low_nf, low_nf, 3, padding=1, stride=2))
            setattr(self, 'down_pool_{}'.format(i), self.down_pool[-1])
            self.down.append(UNet2Module(hps, low_nf, nf))
            setattr(self, 'down_{}'.format(i), self.down[-1])
            if i != 0:
                self.mid.append(Conv3BN(hps, low_nf, low_nf))
                setattr(self, 'mid_{}'.format(i), self.mid[-1])
                self.up.append(UNet2Module(hps, low_nf + nf, low_nf))
                setattr(self, 'up_{}'.format(i), self.up[-1])
        self.conv_final = nn.Conv2d(self.filters[0], hps.n_classes, 1)
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def __init__(self, hps):
        super().__init__(hps)
        s = hps.filters_base
        self.pool = nn.MaxPool2d(2, 2)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
        self.input_conv = BasicConv2d(hps.n_channels, s, 1)
        self.enc_1 = BasicConv2d(s * 1, s * 2, 3, padding=1)
        self.enc_2 = BasicConv2d(s * 2, s * 4, 3, padding=1)
        self.enc_3 = BasicConv2d(s * 4, s * 8, 3, padding=1)
        self.enc_4 = BasicConv2d(s * 8, s * 8, 3, padding=1)
        # https://github.com/pradyu1993/segnet - decoder lacks relu (???)
        self.dec_4 = BasicConv2d(s * 8, s * 8, 3, padding=1)
        self.dec_3 = BasicConv2d(s * 8, s * 4, 3, padding=1)
        self.dec_2 = BasicConv2d(s * 4, s * 2, 3, padding=1)
        self.dec_1 = BasicConv2d(s * 2, s * 1, 3, padding=1)
        self.conv_final = nn.Conv2d(s, hps.n_classes, 1)
项目:self-driving-truck    作者:aleju    | 项目源码 | 文件源码
def forward(self, embedding):
        def act(x):
            return F.relu(x, inplace=True)
        def up(x):
            m = nn.UpsamplingNearest2d(scale_factor=2)
            return m(x)
        x_ae = embedding # Bx256
        x_ae = act(self.ae_fc1_bn(self.ae_fc1(x_ae))) # 128x3x5
        x_ae = x_ae.view(-1, 128, 3, 5)
        x_ae = up(x_ae) # 6x10
        x_ae = act(self.ae_c1_bn(self.ae_c1(x_ae))) # 6x10
        x_ae = up(x_ae) # 12x20
        x_ae = act(self.ae_c2_bn(self.ae_c2(x_ae))) # 12x20 -> 10x20
        x_ae = F.pad(x_ae, (0, 0, 1, 0)) # 11x20
        x_ae = up(x_ae) # 22x40
        x_ae = act(self.ae_c3_bn(self.ae_c3(x_ae))) # 22x40
        x_ae = up(x_ae) # 44x80
        x_ae = F.pad(x_ae, (0, 0, 1, 0)) # add 1px at top (from 44 to 45)
        x_ae = F.sigmoid(self.ae_c4(x_ae))
        return x_ae
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __init__(self):
        super(ColorizationNet, self).__init__()
        self.fc1 = nn.Linear(512, 256)
        self.bn1 = nn.BatchNorm1d(256)
        self.conv1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(128)
        self.conv2 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1)
        self.bn3 = nn.BatchNorm2d(64)
        self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(64)
        self.conv4 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
        self.bn5 = nn.BatchNorm2d(32)
        self.conv5 = nn.Conv2d(32, 2, kernel_size=3, stride=1, padding=1)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
项目:carvana-challenge    作者:chplushsieh    | 项目源码 | 文件源码
def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(self.n_channels, 32, 3, padding=1)
        self.conv2 = nn.Conv2d(32, 32, 3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
        self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
        self.conv4 = nn.Conv2d(64, 64, 3, padding=1)
        self.conv5 = nn.Conv2d(64, 32, 3, padding=1)
        self.conv6 = nn.Conv2d(64, 32, 3, padding=1)
        self.conv7 = nn.Conv2d(32, self.n_classes, 3, padding=1)
项目:SuperResolution    作者:bguisard    | 项目源码 | 文件源码
def __init__(self, num, use_cuda=False):
        super(UpsampleBlock, self).__init__()
        if use_cuda:
            self.up1 = nn.UpsamplingNearest2d(scale_factor=2).cuda(device_id=0)
            self.c2 = nn.Conv2d(num, num, kernel_size=3, stride=1, padding=0).cuda(device_id=0)
            self.b3 = nn.BatchNorm2d(num).cuda(device_id=0)
        else:
            self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
            self.c2 = nn.Conv2d(num, num, kernel_size=3, stride=1, padding=0)
            self.b3 = nn.BatchNorm2d(num)
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def __init__(self, hps):
        super().__init__(hps)
        self.conv1 = nn.Conv2d(hps.n_channels, 32, 3, padding=1)
        self.conv2 = nn.Conv2d(32, 32, 3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
        self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
        self.conv4 = nn.Conv2d(64, 64, 3, padding=1)
        self.conv5 = nn.Conv2d(64, 32, 3, padding=1)
        self.conv6 = nn.Conv2d(64, 32, 3, padding=1)
        self.conv7 = nn.Conv2d(32, hps.n_classes, 3, padding=1)
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def __init__(self, in_, out, scale):
        super().__init__()
        self.up_conv = nn.Conv2d(in_, out, 1)
        self.upsample = nn.UpsamplingNearest2d(scale_factor=scale)
项目:CBEGAN    作者:taey16    | 项目源码 | 文件源码
def deconv_block(in_dim, out_dim):
  return nn.Sequential(nn.Conv2d(in_dim,out_dim,kernel_size=3,stride=1,padding=1),
                       nn.ELU(True),
                       nn.Conv2d(out_dim,out_dim,kernel_size=3,stride=1,padding=1),
                       nn.ELU(True),
                       nn.UpsamplingNearest2d(scale_factor=2))
项目:StackGAN_pytorch    作者:qizhex    | 项目源码 | 文件源码
def __init__(self, imsize, z_size, c_size, c_var_dim):
        super(lr_generator, self).__init__()
        self.lr_context_encoder = context_encoder_g(c_var_dim)
        self.s = imsize
        self.s2, self.s4, self.s8, self.s16 = \
            int(self.s / 2), int(self.s / 4), int(self.s / 8), int(self.s / 16)
        self.gf_dim = cfg.GAN.GF_DIM

        self.node1_0 = nn.Sequential(
            nn.Linear(z_size + c_size, self.s16 * self.s16 * self.gf_dim * 8),
            nn.BatchNorm1d(self.s16 * self.s16 * self.gf_dim * 8)
        )
        self.node1_1 = nn.Sequential(
            custom_con2d((self.s16, self.s16), self.gf_dim * 8, self.gf_dim * 2, (1, 1), (1, 1)),
            nn.BatchNorm2d(self.gf_dim * 2),
            nn.ReLU(),
            custom_con2d((self.s16, self.s16), self.gf_dim * 2, self.gf_dim * 2, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim * 2),
            nn.ReLU(),
            custom_con2d((self.s16, self.s16), self.gf_dim * 2, self.gf_dim * 8, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim * 8)
        )
        self.node2_0 = nn.Sequential(
            nn.UpsamplingNearest2d((self.s8, self.s8)),
            custom_con2d((self.s8, self.s8), self.gf_dim * 8, self.gf_dim * 4, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim * 4)
        )
        self.node2_1 = nn.Sequential(
            custom_con2d((self.s8, self.s8), self.gf_dim * 4, self.gf_dim * 1, (1, 1), (1, 1)),
            nn.BatchNorm2d(self.gf_dim),
            nn.ReLU(),
            custom_con2d((self.s8, self.s8), self.gf_dim * 1, self.gf_dim * 1, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim),
            nn.ReLU(),
            custom_con2d((self.s8, self.s8), self.gf_dim, self.gf_dim * 4, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim * 4),
        )
        self.node3 = nn.Sequential(
            nn.UpsamplingNearest2d((self.s4, self.s4)),
            custom_con2d((self.s4, self.s4), self.gf_dim * 4, self.gf_dim * 2, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim * 2),
            nn.ReLU(),
            nn.UpsamplingNearest2d((self.s2, self.s2)),
            custom_con2d((self.s2, self.s2), self.gf_dim * 2, self.gf_dim, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim),
            nn.ReLU(),
            nn.UpsamplingNearest2d((self.s, self.s)),
            custom_con2d((self.s, self.s), self.gf_dim, 3, (3, 3), (1, 1)),
            nn.Tanh(),
        )
        self.activ = nn.ReLU()
        self.z_dim = z_size
项目:StackGAN_pytorch    作者:qizhex    | 项目源码 | 文件源码
def __init__(self, imsize, c_var_dim):
        super(hr_generator, self).__init__()
        self.hr_context_encoder = context_encoder_g(c_var_dim)
        self.s = imsize
        self.s2, self.s4, self.s8, self.s16 = \
            int(self.s / 2), int(self.s / 4), int(self.s / 8), int(self.s / 16)
        self.gf_dim = cfg.GAN.GF_DIM
        self.ef_dim = cfg.GAN.EMBEDDING_DIM
        self.encode_image = nn.Sequential(
            custom_con2d((self.s, self.s), 3, self.gf_dim, (3, 3), (1, 1)),
            nn.ReLU(),
            custom_con2d((self.s2, self.s2), self.gf_dim, self.gf_dim * 2, (4, 4)),
            nn.BatchNorm2d(self.gf_dim * 2),
            nn.ReLU(),
            custom_con2d((self.s4, self.s4), self.gf_dim * 2, self.gf_dim * 4, (4, 4)),
            nn.BatchNorm2d(self.gf_dim * 4),
            nn.ReLU(),
        )
        self.node0 = nn.Sequential(
            custom_con2d((self.s4, self.s4), self.ef_dim + self.gf_dim * 4, self.gf_dim * 4, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim * 4),
            nn.ReLU(),
        )
        res_list = []
        for i in range(4):
            res_list += [residual_block(imsize)]
        self.node1 = nn.Sequential(*res_list)
        self.node2 = nn.Sequential(
            nn.UpsamplingNearest2d((self.s2, self.s2)),
            custom_con2d((self.s2, self.s2), self.gf_dim * 4, self.gf_dim * 2, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim * 2),
            nn.ReLU(),
            nn.UpsamplingNearest2d((self.s, self.s)),
            custom_con2d((self.s, self.s), self.gf_dim * 2, self.gf_dim, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim),
            nn.ReLU(),
            nn.UpsamplingNearest2d((self.s * 2, self.s * 2)),
            custom_con2d((self.s * 2, self.s * 2), self.gf_dim, self.gf_dim // 2, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim // 2),
            nn.ReLU(),
            nn.UpsamplingNearest2d((self.s * 4, self.s * 4)),
            custom_con2d((self.s * 4, self.s * 4), self.gf_dim // 2, self.gf_dim // 4, (3, 3), (1, 1)),
            nn.BatchNorm2d(self.gf_dim // 4),
            nn.ReLU(),
            custom_con2d((self.s * 4, self.s * 4), self.gf_dim // 4, 3, (3, 3), (1, 1)),
            nn.Tanh(),
        )
项目:iffse    作者:kendricktan    | 项目源码 | 文件源码
def __init__(self, useCuda, gpuDevice=0):
        super(netOpenFace, self).__init__()

        self.gpuDevice = gpuDevice

        self.layer1 = Conv2d(3, 64, (7, 7), (2, 2), (3, 3))
        self.layer2 = BatchNorm(64)
        self.layer3 = nn.ReLU()
        self.layer4 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1))
        self.layer5 = CrossMapLRN(5, 0.0001, 0.75, gpuDevice=gpuDevice)
        self.layer6 = Conv2d(64, 64, (1, 1), (1, 1), (0, 0))
        self.layer7 = BatchNorm(64)
        self.layer8 = nn.ReLU()
        self.layer9 = Conv2d(64, 192, (3, 3), (1, 1), (1, 1))
        self.layer10 = BatchNorm(192)
        self.layer11 = nn.ReLU()
        self.layer12 = CrossMapLRN(5, 0.0001, 0.75, gpuDevice=gpuDevice)
        self.layer13 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=(1, 1))
        self.layer14 = Inception(192, (3, 5), (1, 1), (128, 32), (96, 16, 32, 64), nn.MaxPool2d(
            (3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer15 = Inception(256, (3, 5), (1, 1), (128, 64), (96, 32, 64, 64), nn.LPPool2d(
            2, (3, 3), stride=(3, 3)), True)
        self.layer16 = Inception(320, (3, 5), (2, 2), (256, 64), (128, 32, None, None), nn.MaxPool2d(
            (3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer17 = Inception(640, (3, 5), (1, 1), (192, 64), (96, 32, 128, 256), nn.LPPool2d(
            2, (3, 3), stride=(3, 3)), True)
        self.layer18 = Inception(640, (3, 5), (2, 2), (256, 128), (160, 64, None, None), nn.MaxPool2d(
            (3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer19 = Inception(1024, (3,), (1,), (384,), (96, 96, 256), nn.LPPool2d(
            2, (3, 3), stride=(3, 3)), True)
        self.layer21 = Inception(736, (3,), (1,), (384,), (96, 96, 256), nn.MaxPool2d(
            (3, 3), stride=(2, 2), padding=(0, 0)), True)
        self.layer22 = nn.AvgPool2d((3, 3), stride=(1, 1), padding=(0, 0))
        self.layer25 = Linear(736, 128)

        #
        self.resize1 = nn.UpsamplingNearest2d(scale_factor=3)
        self.resize2 = nn.AvgPool2d(4)

        #
        # self.eval()

        if useCuda:
            self.cuda(gpuDevice)
项目:FaderNetworks    作者:facebookresearch    | 项目源码 | 文件源码
def build_layers(img_sz, img_fm, init_fm, max_fm, n_layers, n_attr, n_skip,
                 deconv_method, instance_norm, enc_dropout, dec_dropout):
    """
    Build auto-encoder layers.
    """
    assert init_fm <= max_fm
    assert n_skip <= n_layers - 1
    assert np.log2(img_sz).is_integer()
    assert n_layers <= int(np.log2(img_sz))
    assert type(instance_norm) is bool
    assert 0 <= enc_dropout < 1
    assert 0 <= dec_dropout < 1
    norm_fn = nn.InstanceNorm2d if instance_norm else nn.BatchNorm2d

    enc_layers = []
    dec_layers = []

    n_in = img_fm
    n_out = init_fm

    for i in range(n_layers):
        enc_layer = []
        dec_layer = []
        skip_connection = n_layers - (n_skip + 1) <= i < n_layers - 1
        n_dec_in = n_out + n_attr + (n_out if skip_connection else 0)
        n_dec_out = n_in

        # encoder layer
        enc_layer.append(nn.Conv2d(n_in, n_out, 4, 2, 1))
        if i > 0:
            enc_layer.append(norm_fn(n_out, affine=True))
        enc_layer.append(nn.LeakyReLU(0.2, inplace=True))
        if enc_dropout > 0:
            enc_layer.append(nn.Dropout(enc_dropout))

        # decoder layer
        if deconv_method == 'upsampling':
            dec_layer.append(nn.UpsamplingNearest2d(scale_factor=2))
            dec_layer.append(nn.Conv2d(n_dec_in, n_dec_out, 3, 1, 1))
        elif deconv_method == 'convtranspose':
            dec_layer.append(nn.ConvTranspose2d(n_dec_in, n_dec_out, 4, 2, 1, bias=False))
        else:
            assert deconv_method == 'pixelshuffle'
            dec_layer.append(nn.Conv2d(n_dec_in, n_dec_out * 4, 3, 1, 1))
            dec_layer.append(nn.PixelShuffle(2))
        if i > 0:
            dec_layer.append(norm_fn(n_dec_out, affine=True))
            if dec_dropout > 0 and i >= n_layers - 3:
                dec_layer.append(nn.Dropout(dec_dropout))
            dec_layer.append(nn.ReLU(inplace=True))
        else:
            dec_layer.append(nn.Tanh())

        # update
        n_in = n_out
        n_out = min(2 * n_out, max_fm)
        enc_layers.append(nn.Sequential(*enc_layer))
        dec_layers.insert(0, nn.Sequential(*dec_layer))

    return enc_layers, dec_layers