Python torch.nn.functional 模块,upsample() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用torch.nn.functional.upsample()

项目:pytorch-semantic-segmentation    作者:ZijunDeng    | 项目源码 | 文件源码
def forward(self, x):
        x_size = x.size()
        x = self.layer0(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        if self.training and self.use_aux:
            aux = self.aux_logits(x)
        x = self.layer4(x)
        x = self.ppm(x)
        x = self.final(x)
        if self.training and self.use_aux:
            return F.upsample(x, x_size[2:], mode='bilinear'), F.upsample(aux, x_size[2:], mode='bilinear')
        return F.upsample(x, x_size[2:], mode='bilinear')


# just a try, not recommend to use
项目:pytorch-retinanet    作者:kuangliu    | 项目源码 | 文件源码
def _upsample_add(self, x, y):
        '''Upsample and add two feature maps.

        Args:
          x: (Variable) top feature map to be upsampled.
          y: (Variable) lateral feature map.

        Returns:
          (Variable) added feature map.

        Note in PyTorch, when input size is odd, the upsampled feature map
        with `F.upsample(..., scale_factor=2, mode='nearest')`
        maybe not equal to the lateral feature map size.

        e.g.
        original input size: [N,_,15,15] ->
        conv2d feature map size: [N,_,8,8] ->
        upsampled feature map size: [N,_,16,16]

        So we choose bilinear upsample which supports arbitrary output sizes.
        '''
        _,_,H,W = y.size()
        return F.upsample(x, size=(H,W), mode='bilinear') + y
项目:dlcv_for_beginners    作者:frombeijingwithlove    | 项目源码 | 文件源码
def forward(self, x):
        # conv & downsampling
        down_sampled_fmaps = []
        for i in range(self.n_stages-1):
            x = self.down_convs[i](x)
            x = self.max_pooling(x)
            down_sampled_fmaps.insert(0, x)

        # center convs
        x = self.down_convs[self.n_stages-1](x)
        x = self.up_convs[0](x)

        # conv & upsampling
        for i, down_sampled_fmap in enumerate(down_sampled_fmaps):
            x = torch.cat([x, down_sampled_fmap], 1)
            x = self.up_convs[i+1](x)
            x = F.upsample(x, scale_factor=2, mode='bilinear')

        return self.out_conv(x)
        #x = self.out_conv(x)
        #return x if self.out_conv.out_channels == 1 else F.relu(x)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def forward(self, y, z):
        x = torch.cat([y, nn.MaxPool2d(self.scale, self.scale)(z)], dim=1)
        y_prime = self.conv1(x)
        y_prime = self.conv2(y_prime)

        x = self.conv_res(y_prime)
        upsample_size = torch.Size([_s*self.scale for _s in y_prime.shape[-2:]])
        x = F.upsample(x, size=upsample_size, mode='nearest')
        z_prime = z + x

        return y_prime, z_prime
项目:RetinaNet    作者:c0nn3r    | 项目源码 | 文件源码
def _upsample(self, original_feature, scaled_feature, scale_factor=2):
        # is this correct? You do lose information on the upscale...
        height, width = scaled_feature.size()[2:]
        return F.upsample(original_feature, scale_factor=scale_factor)[:, :, :height, :width]
项目:kaggle-carvana    作者:ematvey    | 项目源码 | 文件源码
def forward(self, inputs):
        d0 = self.down0(inputs)

        d1 = self.down1(d0)
        d2 = self.down2(F.max_pool2d(d1, kernel_size=2, stride=2))
        d3 = self.down3(F.max_pool2d(d2, kernel_size=2, stride=2))
        d4 = self.down4(F.max_pool2d(d3, kernel_size=2, stride=2))
        d5 = self.down5(F.max_pool2d(d4, kernel_size=2, stride=2))
        d6 = self.down6(F.max_pool2d(d5, kernel_size=2, stride=2))

        out = self.center(F.max_pool2d(d6, kernel_size=2, stride=2))

        out = self.up6(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d6], dim=1))
        out = self.up5(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d5], dim=1))
        out = self.up4(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d4], dim=1))
        out = self.up3(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d3], dim=1))
        out = self.up2(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d2], dim=1))
        out = self.up1(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d1], dim=1))

        out = self.f1(torch.cat([out, d0], dim=1))
        out = self.f2(torch.cat([out, inputs], dim=1))
        out = self.out(out)
        out = out.squeeze(1)  # remove logits dim
        return out
项目:pytorch-semantic-segmentation    作者:ZijunDeng    | 项目源码 | 文件源码
def forward(self, x):
        enc1 = self.enc1(x)
        enc2 = self.enc2(enc1)
        enc3 = self.enc3(enc2)
        enc4 = self.enc4(enc3)
        center = self.center(enc4)
        dec4 = self.dec4(torch.cat([center, F.upsample(enc4, center.size()[2:], mode='bilinear')], 1))
        dec3 = self.dec3(torch.cat([dec4, F.upsample(enc3, dec4.size()[2:], mode='bilinear')], 1))
        dec2 = self.dec2(torch.cat([dec3, F.upsample(enc2, dec3.size()[2:], mode='bilinear')], 1))
        dec1 = self.dec1(torch.cat([dec2, F.upsample(enc1, dec2.size()[2:], mode='bilinear')], 1))
        final = self.final(dec1)
        return F.upsample(final, x.size()[2:], mode='bilinear')
项目:pytorch-semantic-segmentation    作者:ZijunDeng    | 项目源码 | 文件源码
def forward(self, x):
        x_size = x.size()
        out = [x]
        for f in self.features:
            out.append(F.upsample(f(x), x_size[2:], mode='bilinear'))
        out = torch.cat(out, 1)
        return out
项目:pytorch-semantic-segmentation    作者:ZijunDeng    | 项目源码 | 文件源码
def forward(self, x):
        x = self.layer0(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        if self.training and self.use_aux:
            aux = self.aux_logits(x)
        x = self.layer4(x)
        x = self.ppm(x)
        x = self.final(x)
        if self.training and self.use_aux:
            return F.upsample(x, self.input_size, mode='bilinear'), F.upsample(aux, self.input_size, mode='bilinear')
        return F.upsample(x, self.input_size, mode='bilinear')
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingNearest1d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingLinear1d(self):
        m = nn.Upsample(size=4, mode='linear')
        in_t = torch.ones(1, 1, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='linear'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingNearest2d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingBilinear2d(self):
        m = nn.Upsample(size=4, mode='bilinear')
        in_t = torch.ones(1, 1, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='bilinear'), (input,)))
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def test_upsamplingNearest3d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2, 2), requires_grad=True)
        self.assertTrue(gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), (input,)))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingNearest1d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2), requires_grad=True)
        gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingLinear1d(self):
        m = nn.Upsample(size=4, mode='linear')
        in_t = torch.ones(1, 1, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2), requires_grad=True)
        gradcheck(lambda x: F.upsample(x, 4, mode='linear'), (input,))
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingNearest2d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2), requires_grad=True)
        self.assertEqual(
            F.upsample(input, 4, mode='nearest'),
            F.upsample(input, scale_factor=2, mode='nearest'))
        gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
        gradgradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingBilinear2d(self):
        m = nn.Upsample(size=4, mode='bilinear')
        in_t = torch.ones(1, 1, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2), requires_grad=True)
        gradcheck(lambda x: F.upsample(x, 4, mode='bilinear'), [input])
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def test_upsamplingNearest3d(self):
        m = nn.Upsample(size=4, mode='nearest')
        in_t = torch.ones(1, 1, 2, 2, 2)
        out_t = m(Variable(in_t))
        self.assertEqual(torch.ones(1, 1, 4, 4, 4), out_t.data)

        input = Variable(torch.randn(1, 1, 2, 2, 2), requires_grad=True)
        gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def forward(self, x):

        # pass to initial conv
        x = self.conv1(x)

        # pass through residual units
        for i in range(3):
            x = self.up_residual_units[i](x)

        # divide stream
        y = x
        z = self.split_conv(x)

        prev_channels = 48
        # encoding
        for n_blocks, channels, scale in self.encoder_frru_specs:
            # maxpool bigger feature map
            y_pooled = F.max_pool2d(y, stride=2, kernel_size=2, padding=0)
            # pass through encoding FRRUs
            for block in range(n_blocks):
                key = '_'.join(map(str,['encoding_frru', n_blocks, channels, scale, block]))
                y, z = getattr(self, key)(y_pooled, z)
            prev_channels = channels

        # decoding
        for n_blocks, channels, scale in self.decoder_frru_specs:
            # bilinear upsample smaller feature map
            upsample_size = torch.Size([_s*2 for _s in y.size()[-2:]]) 
            y_upsampled = F.upsample(y, size=upsample_size, mode='bilinear')
            # pass through decoding FRRUs
            for block in range(n_blocks):
                key = '_'.join(map(str,['decoding_frru', n_blocks, channels, scale, block]))
                #print "Incoming FRRU Size: ", key, y_upsampled.shape, z.shape
                y, z = getattr(self, key)(y_upsampled, z)
                #print "Outgoing FRRU Size: ", key, y.shape, z.shape
            prev_channels = channels

        # merge streams
        x = torch.cat([F.upsample(y, scale_factor=2, mode='bilinear' ), z], dim=1)
        x = self.merge_conv(x)

        # pass through residual units
        for i in range(3):
            x = self.down_residual_units[i](x)

        # final 1x1 conv to get classification
        x = self.classif_conv(x)

        return x
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def upsample(input, size=None, scale_factor=None, mode='nearest'):
    """Multi-GPU version torch.nn.functional.upsample

    Upsamples the input to either the given :attr:`size` or the given
    :attr:`scale_factor`

    The algorithm used for upsampling is determined by :attr:`mode`.

    Currently temporal, spatial and volumetric upsampling are supported, i.e.
    expected inputs are 3-D, 4-D or 5-D in shape.

    The input dimensions are interpreted in the form:
    `mini-batch x channels x [depth] x [height] x width`

    The modes available for upsampling are: `nearest`, `linear` (3D-only),
    `bilinear` (4D-only), `trilinear` (5D-only)

    Args:
        input (Variable): input
        size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
            output spatial size.
        scale_factor (int): multiplier for spatial size. Has to be an integer.
        mode (string): algorithm used for upsampling:
            'nearest' | 'linear' | 'bilinear' | 'trilinear'. Default: 'nearest'
    """
    if isinstance(input, Variable):
        return F.upsample(input, size=size, scale_factor=scale_factor,
                          mode=mode)
    elif isinstance(input, tuple) or isinstance(input, list):
        lock = threading.Lock()
        results = {}
        def _worker(i, x):
            try:
                with torch.cuda.device_of(x):
                    result =  F.upsample(x, size=size, \
                        scale_factor=scale_factor,mode=mode)
                with lock:
                    results[i] = result
            except Exception as e:
                with lock:
                    resutls[i] = e 
        # multi-threading for different gpu
        threads = [threading.Thread(target=_worker,
                                    args=(i, x),
                                    )
                   for i, (x) in enumerate(input)]
        for thread in threads:
            thread.start()
        for thread in threads:
            thread.join() 
        outputs = dict_to_list(results)
        return outputs
    else:
        raise RuntimeError('unknown input type')