Python torch.nn 模块,UpsamplingBilinear2d() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用torch.nn.UpsamplingBilinear2d()

项目:crnn    作者:wulivicte    | 项目源码 | 文件源码
def assureRatio(img):
    """Ensure imgH <= imgW."""
    b, c, h, w = img.size()
    if h > w:
        main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
        img = main(img)
    return img
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def __init__(self, in_size, out_size, is_deconv):
        super(unetUp, self).__init__()
        self.conv = unetConv2(in_size, out_size, False)
        if is_deconv:
            self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
        else:
            self.up = nn.UpsamplingBilinear2d(scale_factor=2)
项目:sceneReco    作者:bear63    | 项目源码 | 文件源码
def assureRatio(img):
    """Ensure imgH <= imgW."""
    b, c, h, w = img.size()
    if h > w:
        main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
        img = main(img)
    return img
项目:crnn.pytorch    作者:meijieru    | 项目源码 | 文件源码
def assureRatio(img):
    """Ensure imgH <= imgW."""
    b, c, h, w = img.size()
    if h > w:
        main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
        img = main(img)
    return img
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self, joint_index, mpii, batch_size):
        super(JointModel, self).__init__()

        self.joint_index = joint_index

        self.heatmap_size = mpii.heatmap_size
        self.max_people = mpii.max_people
        self.max_joints = mpii.max_joints

        ndf = 64

        self.conv0 = nn.Conv2d(mpii.image_num_components, ndf, 11, stride=2)
        self.conv1 = nn.Conv2d(ndf, ndf * 2, 9, stride=2)
        self.conv2 = nn.Conv2d(ndf * 2, ndf * 4, 7, stride=2)
        self.conv3 = nn.Conv2d(ndf * 4, ndf * 8, 5, stride=2)                

        self.fc0_size = 512 * 3 * 3
        self.fc0 = nn.Linear(self.fc0_size, self.heatmap_size * self.heatmap_size)

        self.relu = nn.ReLU(inplace=True)
        self.tanh = nn.Tanh()
        self.softmax = nn.Softmax()

        self.loss = nn.BCELoss().cuda()

        self.images = Variable(torch.FloatTensor(batch_size, mpii.image_num_components, mpii.image_size, mpii.image_size)).cuda()
        self.labels = Variable(torch.FloatTensor(batch_size, self.heatmap_size, self.heatmap_size)).cuda()

        self.scale_heatmap = nn.UpsamplingBilinear2d(scale_factor=4)
项目:sceneReco    作者:yijiuzai    | 项目源码 | 文件源码
def assureRatio(img):
    """Ensure imgH <= imgW."""
    b, c, h, w = img.size()
    if h > w:
        main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
        img = main(img)
    return img
项目:depth-semantic-fully-conv    作者:iapatil    | 项目源码 | 文件源码
def __init__(self, block1, block2, batch_size):
        super(Model, self).__init__()
        self.batch_size=batch_size

        # Layers for Depth Estimation
        self.conv1 = nn.Conv2d(3, 64, kernel_size = 7, stride=2, padding=4)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.max_pool = nn.MaxPool2d(3,stride=2)

        self.proj_layer1 = self.make_proj_layer(block1, 64 , d1 = 64, d2 = 256, stride = 1)
        self.skip_layer1_1 = self.make_skip_layer(block1, 256, d1 = 64, d2 = 256, stride=1)
        self.skip_layer1_2 = self.make_skip_layer(block1, 256, d1 = 64, d2 = 256, stride=1)

        self.proj_layer2 = self.make_proj_layer(block1, 256 , d1 = 128, d2 = 512, stride = 2)
        self.skip_layer2_1 = self.make_skip_layer(block1, 512, d1 = 128, d2 = 512)
        self.skip_layer2_2 = self.make_skip_layer(block1, 512, d1 = 128, d2 = 512)
        self.skip_layer2_3 = self.make_skip_layer(block1, 512, d1 = 128, d2 = 512)

        self.proj_layer3 = self.make_proj_layer(block1, 512 , d1 = 256, d2 = 1024, stride=2)
        self.skip_layer3_1 = self.make_skip_layer(block1, 1024, d1 = 256, d2 = 1024)
        self.skip_layer3_2 = self.make_skip_layer(block1, 1024, d1 = 256, d2 = 1024)
        self.skip_layer3_3 = self.make_skip_layer(block1, 1024, d1 = 256, d2 = 1024)
        self.skip_layer3_4 = self.make_skip_layer(block1, 1024, d1 = 256, d2 = 1024)
        self.skip_layer3_5 = self.make_skip_layer(block1, 1024, d1 = 256, d2 = 1024)

        self.proj_layer4 = self.make_proj_layer(block1, 1024 , d1 = 512, d2 = 2048, stride=2)
        self.skip_layer4_1 = self.make_skip_layer(block1, 2048, d1 = 512, d2 = 2048)
        self.skip_layer4_2 = self.make_skip_layer(block1, 2048, d1 = 512, d2 = 2048)

        self.conv2 = nn.Conv2d(2048,1024,1)
        self.bn2 = nn.BatchNorm2d(1024)

        self.up_conv1 = self.make_up_conv_layer(block2, 1024, 512, self.batch_size)
        self.up_conv2 = self.make_up_conv_layer(block2, 512, 256, self.batch_size)
        self.up_conv3 = self.make_up_conv_layer(block2, 256, 128, self.batch_size)
        self.up_conv4 = self.make_up_conv_layer(block2, 128, 64, self.batch_size)

        self.conv3 = nn.Conv2d(64,1,3, padding=1)

        # Layers for Semantic Segmentation
        self.up_conv5 = self.make_up_conv_layer(block2,128 ,64 ,self.batch_size)
        self.conv4 = nn.Conv2d(64,48,3,padding=1) 
        self.bn4 = nn.BatchNorm2d(48)
        self.conv5 = nn.Conv2d(48,38,3,padding=1) 
        self.bn5 = nn.BatchNorm2d(38)
        self.dropout = nn.Dropout2d(p=1)

        self.upsample = nn.UpsamplingBilinear2d(size = (480,640))