Python torch.nn.functional 模块,max_pool2d() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.functional.max_pool2d()

项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def forward(self, x):
        n_idx = 0
        c_idx = 1
        h_idx = 2
        w_idx = 3

        x = self.lookup_table(x)
        x = x.unsqueeze(c_idx)

        enc_outs = []
        for encoder in self.encoders:
            enc_ = F.relu(encoder(x))
            k_h = enc_.size()[h_idx]
            enc_ = F.max_pool2d(enc_, kernel_size=(k_h, 1))
            enc_ = enc_.squeeze(w_idx)
            enc_ = enc_.squeeze(h_idx)
            enc_outs.append(enc_)

        encoding = self.dropout(torch.cat(enc_outs, 1))
        return F.log_softmax(self.logistic(encoding))
项目:FastNeuralStyle    作者:bengxy    | 项目源码 | 文件源码
def forward(self, X):
        h = F.relu(self.conv1_1(X))
        h = F.relu(self.conv1_2(h))
        relu1_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        relu2_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        relu3_3 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        relu4_3 = h

        return [relu1_2,relu2_2,relu3_3,relu4_3]
项目:tiny-style-transfer    作者:ggsonic    | 项目源码 | 文件源码
def vgg(inputs, model):
    '''VGG definition with style and content outputs.
    '''
    style, content = [], []

    def block(x, ids):
        for i in ids:
            x = F.relu(F.conv2d(x, Variable(model.features[i].weight.data.cuda()),Variable(model.features[i].bias.data.cuda()), 1, 1), inplace=True)
            if i in style_layers:
                style.append(gram(x))
            if i in content_layers:
                content.append(x)
        return F.max_pool2d(x, 2, 2)

    o = block(inputs, [0, 2])
    o = block(o, [5, 7])
    o = block(o, [10, 12, 14])
    o = block(o, [17, 19, 21])
    o = block(o, [24, 26, 28])
    return style, content
项目:fast-neural-style    作者:abhiskk    | 项目源码 | 文件源码
def forward(self, X):
        h = F.relu(self.conv1_1(X))
        h = F.relu(self.conv1_2(h))
        relu1_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        relu2_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        relu3_3 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        relu4_3 = h

        return [relu1_2, relu2_2, relu3_3, relu4_3]
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def forward(self, x):
        nBatch = x.size(0)

        x = F.max_pool2d(self.conv1(x), 2)
        x = F.max_pool2d(self.conv2(x), 2)
        x = x.view(nBatch, -1)

        L = self.M*self.L
        Q = L.mm(L.t()) + self.eps*Variable(torch.eye(self.nHidden)).cuda()
        Q = Q.unsqueeze(0).expand(nBatch, self.nHidden, self.nHidden)
        G = self.G.unsqueeze(0).expand(nBatch, self.nineq, self.nHidden)
        z0 = self.qp_z0(x)
        s0 = self.qp_s0(x)
        h = z0.mm(self.G.t())+s0
        e = Variable(torch.Tensor())
        inputs = self.qp_o(x)
        x = QPFunction()(Q, inputs, G, h, e, e)
        x = x[:,:10]

        return F.log_softmax(x)
项目:pytorch-fast-neural-style    作者:vishal1796    | 项目源码 | 文件源码
def forward(self, x):
        relu1_1  = F.relu(self.conv1_1(x))
        relu1_2  = F.relu(self.conv1_2(relu1_1))
        maxpool_1  = F.max_pool2d(relu1_2, kernel_size=2, stride=2)
        relu2_1  = F.relu(self.conv2_1(maxpool_1))
        relu2_2  = F.relu(self.conv2_2(relu2_1))
        maxpool_2  = F.max_pool2d(relu2_2, kernel_size=2, stride=2)
        relu3_1  = F.relu(self.conv3_1(maxpool_2))
        relu3_2  = F.relu(self.conv3_2(relu3_1))
        relu3_3  = F.relu(self.conv3_3(relu3_2))
        maxpool_3  = F.max_pool2d(relu3_3, kernel_size=2, stride=2)
        relu4_1  = F.relu(self.conv4_1(maxpool_3))
        relu4_2  = F.relu(self.conv4_2(relu4_1))
        relu4_3  = F.relu(self.conv4_3(relu4_2))

        return [relu1_2,relu2_2,relu3_3,relu4_3]
项目:Deep-learning-with-cats    作者:AlexiaJM    | 项目源码 | 文件源码
def forward(self, X):
        h = F.relu(self.conv1_1(X))
        h = F.relu(self.conv1_2(h))
        relu1_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        relu2_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        relu3_3 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        relu4_3 = h
        return [relu1_2, relu2_2, relu3_3, relu4_3]

## Weights init function
项目:DeepPoseComparison    作者:ynaka81    | 项目源码 | 文件源码
def forward(self, x):
        # layer1
        h = F.relu(self.conv1(x))
        h = F.max_pool2d(h, 3, stride=2)
        # layer2
        h = F.relu(self.conv2(h))
        h = F.max_pool2d(h, 3, stride=2)
        # layer3-5
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.relu(self.conv5(h))
        h = F.max_pool2d(h, 3, stride=2)
        h = h.view(-1, 256*6*6)
        # layer6-8
        h = F.dropout(F.relu(self.fc6(h)), training=self.training)
        h = F.dropout(F.relu(self.fc7(h)), training=self.training)
        h = self.fc8(h)
        return h.view(-1, self.Nj, 2)
项目:pytorch-retinanet    作者:kuangliu    | 项目源码 | 文件源码
def forward(self, x):
        # Bottom-up
        c1 = F.relu(self.bn1(self.conv1(x)))
        c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
        c2 = self.layer1(c1)
        c3 = self.layer2(c2)
        c4 = self.layer3(c3)
        c5 = self.layer4(c4)
        p6 = self.conv6(c5)
        p7 = self.conv7(F.relu(p6))
        # Top-down
        p5 = self.latlayer1(c5)
        p4 = self._upsample_add(p5, self.latlayer2(c4))
        p4 = self.toplayer1(p4)
        p3 = self._upsample_add(p4, self.latlayer3(c3))
        p3 = self.toplayer2(p3)
        return p3, p4, p5, p6, p7
项目:samples    作者:delta-onera    | 项目源码 | 文件源码
def forward(self, x):
        x = F.relu(self.conv1_1(x))
        x = F.relu(self.conv1_2(x))
        x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)

        x = F.relu(self.conv2_1(x))
        x = F.relu(self.conv2_2(x))
        x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)

        x = F.relu(self.conv3_1(x))
        x = F.relu(self.conv3_2(x))
        x = F.relu(self.conv3_3(x))
        x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)

        x = F.relu(self.conv4_1(x))
        x = F.relu(self.conv4_2(x))
        x = F.relu(self.conv4_3(x))
        x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)

        x = F.relu(self.conv5_1(x))
        x = F.relu(self.conv5_2(x))
        x = F.relu(self.conv5_3(x))

        x = self.prob(x)
        return x
项目:PytorchDL    作者:FredHuangBia    | 项目源码 | 文件源码
def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.bn1(x)
        x = self.conv2(x)
        x = self.bn2(x)
        x = F.max_pool2d(x, kernel_size=(1,2), stride=(1,2), padding=(0,1))
        x = F.relu(x)
        x = self.conv3(x)
        x = self.bn3(x)
        x = F.max_pool2d(x, kernel_size=(1,2), stride=(1,2), padding=(0,1))
        x = F.relu(x)
        x = self.conv4(x)
        x = self.bn4(x)
        x = F.avg_pool2d(x, kernel_size=(1,2), stride=(1,2), padding=0)
        x = F.relu(x)
        x = x.view(-1,192)

        return x
项目:pytorch-planet-amazon    作者:rwightman    | 项目源码 | 文件源码
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
    """Selectable global pooling function with dynamic input kernel size
    """
    if pool_type == 'avgmaxc':
        x = torch.cat([
            F.avg_pool2d(
                x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
            F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
        ], dim=1)
    elif pool_type == 'avgmax':
        x_avg = F.avg_pool2d(
                x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
        x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
        x = 0.5 * (x_avg + x_max)
    elif pool_type == 'max':
        x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
    else:
        if pool_type != 'avg':
            print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
        x = F.avg_pool2d(
            x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
    return x
项目:pytorch-dpn-pretrained    作者:rwightman    | 项目源码 | 文件源码
def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
    """Selectable global pooling function with dynamic input kernel size
    """
    if pool_type == 'avgmaxc':
        x = torch.cat([
            F.avg_pool2d(
                x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
            F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
        ], dim=1)
    elif pool_type == 'avgmax':
        x_avg = F.avg_pool2d(
                x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
        x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
        x = 0.5 * (x_avg + x_max)
    elif pool_type == 'max':
        x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
    else:
        if pool_type != 'avg':
            print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
        x = F.avg_pool2d(
            x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
    return x
项目:pytorch-faster-rcnn    作者:ruotianluo    | 项目源码 | 文件源码
def _crop_pool_layer(self, bottom, rois, max_pool=True):
    # implement it using stn
    # box to affine
    # input (x1,y1,x2,y2)
    """
    [  x2-x1             x1 + x2 - W + 1  ]
    [  -----      0      ---------------  ]
    [  W - 1                  W - 1       ]
    [                                     ]
    [           y2-y1    y1 + y2 - H + 1  ]
    [    0      -----    ---------------  ]
    [           H - 1         H - 1      ]
    """
    rois = rois.detach()

    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = bottom.size(2)
    width = bottom.size(3)

    # affine theta
    theta = Variable(rois.data.new(rois.size(0), 2, 3).zero_())
    theta[:, 0, 0] = (x2 - x1) / (width - 1)
    theta[:, 0 ,2] = (x1 + x2 - width + 1) / (width - 1)
    theta[:, 1, 1] = (y2 - y1) / (height - 1)
    theta[:, 1, 2] = (y1 + y2 - height + 1) / (height - 1)

    if max_pool:
      pre_pool_size = cfg.POOLING_SIZE * 2
      grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))
      crops = F.grid_sample(bottom.expand(rois.size(0), bottom.size(1), bottom.size(2), bottom.size(3)), grid)
      crops = F.max_pool2d(crops, 2, 2)
    else:
      grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
      crops = F.grid_sample(bottom.expand(rois.size(0), bottom.size(1), bottom.size(2), bottom.size(3)), grid)

    return crops
项目:future-price-predictor    作者:htfy96    | 项目源码 | 文件源码
def forward(self, x):
        x = F.max_pool2d(F.relu(self.conv1(x)), 2)
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.softmax(x)
        return x
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def forward(self, input):
        bsz, word_len, char_len = input.size()
        encode = input.view(-1, char_len)
        encode = self.char_ebd(encode).unsqueeze(1)
        encode = F.relu(self.char_cnn(encode))
        encode = F.max_pool2d(encode,
                    kernel_size=(encode.size(2), 1))
        encode = F.dropout(encode.squeeze(), p=self.dropout)
        return encode.view(bsz, word_len, -1)
项目:neural-style    作者:ctliu3    | 项目源码 | 文件源码
def forward(self, x, typ):
        if typ == ForwardType.Content:
            is_style, is_content = False, True
        elif typ == ForwardType.Style:
            is_style, is_content = True, False
        elif typ == ForwardType.Train:
            is_style, is_content = True, True
        else:
            raise Exception('Unknown forward type, {}'.format(typ))

        internals = {}

        x = F.relu(self.conv1_1(x))
        x = F.relu(self.conv1_2(x))
        if is_style:
            internals['conv1_2'] = x
        x = F.max_pool2d(x, 2, stride=2)

        x = F.relu(self.conv2_1(x))
        x = F.relu(self.conv2_2(x))
        if is_style or is_content:
            internals['conv2_2'] = x
        x = F.max_pool2d(x, 2, stride=2)

        x = F.relu(self.conv3_1(x))
        x = F.relu(self.conv3_2(x))
        x = F.relu(self.conv3_3(x))
        if is_style:
            internals['conv3_3'] = x
        x = F.max_pool2d(x, 2, stride=2)

        x = F.relu(self.conv4_1(x))
        x = F.relu(self.conv4_2(x))
        x = F.relu(self.conv4_3(x))
        if is_style:
            internals['conv4_3'] = x

        return internals
项目:pytorch-caffe-darknet-convert    作者:marvis    | 项目源码 | 文件源码
def forward(self, x):
        x = F.max_pool2d(F.pad(x, (0,1,0,1), mode='replicate'), 2, stride=1)
        return x
项目:pytorch-tutorials    作者:tfygg    | 项目源码 | 文件源码
def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2)
        x = x.view(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        out = self.fc3(x)
        return out
项目:Bilinear_CNN_dog_classifi    作者:chencodeX    | 项目源码 | 文件源码
def forward(self, x):
        branch3x3 = self.branch3x3(x)

        branch3x3dbl = self.branch3x3dbl_1(x)
        branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
        branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)

        branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)

        outputs = [branch3x3, branch3x3dbl, branch_pool]
        return torch.cat(outputs, 1)
项目:Bilinear_CNN_dog_classifi    作者:chencodeX    | 项目源码 | 文件源码
def forward(self, x):
        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)

        branch7x7x3 = self.branch7x7x3_1(x)
        branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
        branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
        branch7x7x3 = self.branch7x7x3_4(branch7x7x3)

        branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
        outputs = [branch3x3, branch7x7x3, branch_pool]
        return torch.cat(outputs, 1)
项目:wide-resnet.pytorch    作者:meliketoy    | 项目源码 | 文件源码
def forward(self, x):
        out = F.relu(self.conv1(x))
        out = F.max_pool2d(out, 2)
        out = F.relu(self.conv2(out))
        out = F.max_pool2d(out, 2)
        out = out.view(out.size(0), -1)
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)

        return(out)
项目:pytorch-cifar    作者:kuangliu    | 项目源码 | 文件源码
def forward(self, x):
        y1 = self.sep_conv1(x)
        y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
        if self.stride==2:
            y2 = self.bn1(self.conv1(y2))
        return F.relu(y1+y2)
项目:pytorch-cifar    作者:kuangliu    | 项目源码 | 文件源码
def forward(self, x):
        # Left branch
        y1 = self.sep_conv1(x)
        y2 = self.sep_conv2(x)
        # Right branch
        y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
        if self.stride==2:
            y3 = self.bn1(self.conv1(y3))
        y4 = self.sep_conv3(x)
        # Concat & reduce channels
        b1 = F.relu(y1+y2)
        b2 = F.relu(y3+y4)
        y = torch.cat([b1,b2], 1)
        return F.relu(self.bn2(self.conv2(y)))
项目:pytorch-cifar    作者:kuangliu    | 项目源码 | 文件源码
def forward(self, x):
        out = F.relu(self.conv1(x))
        out = F.max_pool2d(out, 2)
        out = F.relu(self.conv2(out))
        out = F.max_pool2d(out, 2)
        out = out.view(out.size(0), -1)
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        return out
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def forward(self, x):
        x = F.max_pool2d(self.conv1(x), 2)
        x = F.relu(x)+F.relu(-x)
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = self.bn(x)
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        x = F.log_softmax(x)
        return x
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def forward(self, i):
        i = self.cn1(i)
        i = F.relu(i)
        i = F.max_pool2d(i, 2)
        i =self.cn2(i)
        i = F.relu(i)
        i = F.max_pool2d(i, 2)
        i = i.view(len(i), -1)
        i = self.fc1(i)
        i = F.log_softmax(i)
        return i

#get some random data around value
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def forward(self, x):
        x = F.max_pool2d(self.conv1(x), 2)
        x = F.relu(x)+F.relu(-x)
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = self.bn(x)
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        x = F.log_softmax(x)
        return x
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)

# not working
项目:kaggle-carvana    作者:ematvey    | 项目源码 | 文件源码
def forward(self, inputs):
        d0 = self.down0(inputs)

        d1 = self.down1(d0)
        d2 = self.down2(F.max_pool2d(d1, kernel_size=2, stride=2))
        d3 = self.down3(F.max_pool2d(d2, kernel_size=2, stride=2))
        d4 = self.down4(F.max_pool2d(d3, kernel_size=2, stride=2))
        d5 = self.down5(F.max_pool2d(d4, kernel_size=2, stride=2))
        d6 = self.down6(F.max_pool2d(d5, kernel_size=2, stride=2))

        out = self.center(F.max_pool2d(d6, kernel_size=2, stride=2))

        out = self.up6(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d6], dim=1))
        out = self.up5(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d5], dim=1))
        out = self.up4(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d4], dim=1))
        out = self.up3(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d3], dim=1))
        out = self.up2(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d2], dim=1))
        out = self.up1(
            torch.cat([F.upsample(out, scale_factor=2, mode='bilinear'), d1], dim=1))

        out = self.f1(torch.cat([out, d0], dim=1))
        out = self.f2(torch.cat([out, inputs], dim=1))
        out = self.out(out)
        out = out.squeeze(1)  # remove logits dim
        return out
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def forward(self, x):
        nBatch = x.size(0)

        x = F.max_pool2d(self.conv1(x), 2)
        x = F.max_pool2d(self.conv2(x), 2)
        x = x.view(nBatch, -1)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return self.projF(x)
项目:pytorch_60min_blitz    作者:kyuhyoung    | 项目源码 | 文件源码
def __init__(self):
        super(Net, self).__init__()
        # 1 input image channel, 6 output channels, 5x5 square convolution
        # kernel
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        # an affine operation: y = Wx + b
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
        #self.relu1 = F.relu(self.conv1)
        #self.pool1 = F.max_pool2d(self.relu1, 2)
项目:pytorch_60min_blitz    作者:kyuhyoung    | 项目源码 | 文件源码
def forward_ori(self, x):
        # Max pooling over a (2, 2) window
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        # If the size is a square you can only specify a single number
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
         #self.num_flat_features(x) = 16 * 5 * 5
        x = x.view(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
项目:Deep-Learning    作者:tankche1    | 项目源码 | 文件源码
def Down(self,x, M, N):
        x = F.max_pool2d(x, (2, 2))
        #print(x.data)
        x = self.Block(x, M, N)
        return x
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def forward(self, input):
        if isinstance(input, Variable):
            return F.max_pool2d(input, self.kernel_size, self.stride, \
                self.padding, self.dilation, self.ceil_mode, \
                self.return_indices)
        elif isinstance(input, tuple) or isinstance(input, list):
            return my_data_parallel(self, input)
        else:
            raise RuntimeError('unknown input type')
项目:MNIST_center_loss_pytorch    作者:jxgu1016    | 项目源码 | 文件源码
def forward(self, x):
        x = self.prelu1_1(self.conv1_1(x))
        x = self.prelu1_2(self.conv1_2(x))
        x = F.max_pool2d(x,2)
        x = self.prelu2_1(self.conv2_1(x))
        x = self.prelu2_2(self.conv2_2(x))
        x = F.max_pool2d(x,2)
        x = self.prelu3_1(self.conv3_1(x))
        x = self.prelu3_2(self.conv3_2(x))
        x = F.max_pool2d(x,2)
        x = x.view(-1, 128*3*3)
        ip1 = self.preluip1(self.ip1(x))
        ip2 = self.ip2(ip1)
        return ip1, F.log_softmax(ip2)
项目:samples    作者:delta-onera    | 项目源码 | 文件源码
def forward(self, x):
        x = F.relu(self.conv1_1(x))
        x = F.relu(self.conv1_2(x))
        p1 = self.prob1(x)
        x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)

        x = F.relu(self.conv2_1(x))
        x = F.relu(self.conv2_2(x))
        p2 = self.prob2(x)
        p2=F.upsample_nearest(p2, scale_factor=2)
        x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)

        x = F.relu(self.conv3_1(x))
        x = F.relu(self.conv3_2(x))
        x = F.relu(self.conv3_3(x))
        p4 = self.prob4(x)
        p4=F.upsample_nearest(p4, scale_factor=4)
        x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)

        x = F.relu(self.conv4_1(x))
        x = F.relu(self.conv4_2(x))
        x = F.relu(self.conv4_3(x))
        p8 = self.prob8(x)
        p8=F.upsample_nearest(p8, scale_factor=8)
        x = F.max_pool2d(x,kernel_size=2, stride=2,return_indices=False)

        x = F.relu(self.conv5_1(x))
        x = F.relu(self.conv5_2(x))
        x = F.relu(self.conv5_3(x))
        p16 = self.prob16(x)
        p16=F.upsample_nearest(p16, scale_factor=16)

        return p1/16+p2/8+p4/4+p8/2+p16
项目:samples    作者:delta-onera    | 项目源码 | 文件源码
def forward(self, x):
        x = F.max_pool2d(self.conv1(x), 2)
        x = F.max_pool2d(self.conv2(x), 2)
        x = x.view(-1, 1024)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x
项目:pytorch-yolo2    作者:marvis    | 项目源码 | 文件源码
def forward(self, x):
        x = F.max_pool2d(F.pad(x, (0,1,0,1), mode='replicate'), 2, stride=1)
        return x
项目:faster-rcnn.pytorch    作者:jwyang    | 项目源码 | 文件源码
def forward(self, features, rois):
        x =  RoIAlignFunction(self.aligned_height+1, self.aligned_width+1,
                                self.spatial_scale)(features, rois)
        return max_pool2d(x, kernel_size=2, stride=1)
项目:pytorch-pose    作者:bearpaw    | 项目源码 | 文件源码
def _hour_glass_forward(self, n, x):
        up1 = self.hg[n-1][0](x)
        low1 = F.max_pool2d(x, 2, stride=2)
        low1 = self.hg[n-1][1](low1)

        if n > 1:
            low2 = self._hour_glass_forward(n-1, low1)
        else:
            low2 = self.hg[n-1][3](low1)
        low3 = self.hg[n-1][2](low2)
        up2 = self.upsample(low3)
        out = up1 + up2
        return out
项目:nn-transfer    作者:gzuidhof    | 项目源码 | 文件源码
def forward(self, x):
        out = F.relu(self.conv1(x))
        out = F.max_pool2d(out, 2)
        out = F.relu(self.conv2(out))
        out = F.max_pool2d(out, 2)
        out = out.view(out.size(0), -1)
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        return out
项目:nn-transfer    作者:gzuidhof    | 项目源码 | 文件源码
def forward(self, x):
        out = F.relu(self.bn(self.conv1(x)))
        out = F.max_pool2d(out, 2)
        out = out.view(out.size(0), -1)
        out = self.fc1(out)
        return out
项目:pretrained-models.pytorch    作者:Cadene    | 项目源码 | 文件源码
def define_model(params):
    def conv2d(input, params, base, stride=1, pad=0):
        return F.conv2d(input, params[base + '.weight'],
                        params[base + '.bias'], stride, pad)

    def group(input, params, base, stride, n):
        o = input
        for i in range(0,n):
            b_base = ('%s.block%d.conv') % (base, i)
            x = o
            o = conv2d(x, params, b_base + '0')
            o = F.relu(o)
            o = conv2d(o, params, b_base + '1', stride=i==0 and stride or 1, pad=1)
            o = F.relu(o)
            o = conv2d(o, params, b_base + '2')
            if i == 0:
                o += conv2d(x, params, b_base + '_dim', stride=stride)
            else:
                o += x
            o = F.relu(o)
        return o

    # determine network size by parameters
    blocks = [sum([re.match('group%d.block\d+.conv0.weight'%j, k) is not None
                   for k in params.keys()]) for j in range(4)]

    def f(input, params, pooling_classif=True):
        o = F.conv2d(input, params['conv0.weight'], params['conv0.bias'], 2, 3)
        o = F.relu(o)
        o = F.max_pool2d(o, 3, 2, 1)
        o_g0 = group(o, params, 'group0', 1, blocks[0])
        o_g1 = group(o_g0, params, 'group1', 2, blocks[1])
        o_g2 = group(o_g1, params, 'group2', 2, blocks[2])
        o_g3 = group(o_g2, params, 'group3', 2, blocks[3])
        if pooling_classif:
            o = F.avg_pool2d(o_g3, 7, 1, 0)
            o = o.view(o.size(0), -1)
            o = F.linear(o, params['fc.weight'], params['fc.bias'])
        return o

    return f
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def forward(self, x):
        branch3x3 = self.branch3x3(x)

        branch3x3dbl = self.branch3x3dbl_1(x)
        branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
        branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)

        branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)

        outputs = [branch3x3, branch3x3dbl, branch_pool]
        return torch.cat(outputs, 1)
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def forward(self, x):
        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)

        branch7x7x3 = self.branch7x7x3_1(x)
        branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
        branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
        branch7x7x3 = self.branch7x7x3_4(branch7x7x3)

        branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
        outputs = [branch3x3, branch7x7x3, branch_pool]
        return torch.cat(outputs, 1)
项目:pytorch-playground    作者:aaron-xichen    | 项目源码 | 文件源码
def forward(self, x):
        branch3x3 = self.branch3x3(x)

        branch3x3dbl = self.branch3x3dbl_1(x)
        branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
        branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)

        branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)

        outputs = [branch3x3, branch3x3dbl, branch_pool]
        return torch.cat(outputs, 1)
项目:pytorch-playground    作者:aaron-xichen    | 项目源码 | 文件源码
def forward(self, x):
        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)

        branch7x7x3 = self.branch7x7x3_1(x)
        branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
        branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
        branch7x7x3 = self.branch7x7x3_4(branch7x7x3)

        branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
        outputs = [branch3x3, branch7x7x3, branch_pool]
        return torch.cat(outputs, 1)
项目:tutorials    作者:pytorch    | 项目源码 | 文件源码
def forward(self, x):
        # Max pooling over a (2, 2) window
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        # If the size is a square you can only specify a single number
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
项目:Athena    作者:bakhyeonjae    | 项目源码 | 文件源码
def forward(self, x):
        x = F.max_pool2d(F.relu(self.conv1(x)), 2)
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, 64*7*7)   # reshape Variable
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)