Python torchvision.models 模块,resnet50() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用torchvision.models.resnet50()

项目:speed    作者:keon    | 项目源码 | 文件源码
def __init__(self, n_layers=2, h_size=512):
        super(ResLSTM, self).__init__()
        print('Building AlexNet + LSTM model...')
        self.h_size = h_size
        self.n_layers = n_layers

        resnet = models.resnet50(pretrained=True)
        self.conv = nn.Sequential(*list(resnet.children())[:-1])

        self.lstm = nn.LSTM(1280, h_size, dropout=0.2, num_layers=n_layers)
        self.fc = nn.Sequential(
            nn.Linear(h_size, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 1)
        )
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def __init__(self, args):
        nclass=args.nclass
        super(Net, self).__init__()
        self.backbone = args.backbone
        # copying modules from pretrained models
        if self.backbone == 'resnet50':
            self.pretrained = resnet.resnet50(pretrained=True)
        elif self.backbone == 'resnet101':
            self.pretrained = resnet.resnet101(pretrained=True)
        elif self.backbone == 'resnet152':
            self.pretrained = resnet.resnet152(pretrained=True)
        else:
            raise RuntimeError('unknown backbone: {}'.format(self.backbone))
        n_codes = 32
        self.head = nn.Sequential(
            nn.Conv2d(2048, 128, 1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            encoding.nn.Encoding(D=128,K=n_codes),
            encoding.nn.View(-1, 128*n_codes),
            encoding.nn.Normalize(),
            nn.Linear(128*n_codes, nclass),
        )
项目:voc_classification    作者:HyeonwooNoh    | 项目源码 | 文件源码
def GetPretrainedModel(params, num_classes):
    if params['model'] == 'resnet18':
        model = models.resnet18(pretrained=True)
    elif params['model'] == 'resnet34':
        model = models.resnet34(pretrained=True)
    elif params['model'] == 'resnet50':
        model = models.resnet50(pretrained=True)
    elif params['model'] == 'resnet101':
        model = models.resnet101(pretrained=True)
    elif params['model'] == 'resnet152':
        model = models.resnet152(pretrained=True)
    else:
        raise ValueError('Unknown model type')
    num_features = model.fc.in_features
    model.fc = SigmoidLinear(num_features, num_classes)
    return model
项目:wildcat.pytorch    作者:durandtibo    | 项目源码 | 文件源码
def resnet50_wildcat(num_classes, pretrained=True, kmax=1, kmin=None, alpha=1, num_maps=1):
    model = models.resnet50(pretrained)
    pooling = nn.Sequential()
    pooling.add_module('class_wise', ClassWisePool(num_maps))
    pooling.add_module('spatial', WildcatPool2d(kmax, kmin, alpha))
    return ResNetWSL(model, num_classes * num_maps, pooling=pooling)
项目:pytorch-seq2seq    作者:rowanz    | 项目源码 | 文件源码
def __init__(self, input_size, hidden_size, use_embedding=False, use_cnn=False, vocab_size=None,
                 pad_idx=None):
        """
        Bidirectional GRU for encoding sequences
        :param input_size: Size of the feature dimension (or, if use_embedding=True, the embed dim)
        :param hidden_size: Size of the GRU hidden layer. Outputs will be hidden_size*2
        :param use_embedding: True if we need to embed the sequences
        :param vocab_size: Size of vocab (only used if use_embedding=True)
        """

        super(EncoderRNN, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.gru = nn.GRU(input_size, hidden_size, bidirectional=True)

        self.use_embedding = use_embedding
        self.use_cnn = use_cnn
        self.vocab_size = vocab_size
        self.embed = None
        if self.use_embedding:
            assert self.vocab_size is not None
            self.pad = pad_idx
            self.embed = nn.Embedding(self.vocab_size, self.input_size, padding_idx=pad_idx)
        elif self.use_cnn:
            self.embed = models.resnet50(pretrained=True)

            for param in self.embed.parameters():
                param.requires_grad = False
            self.embed.fc = nn.Linear(self.embed.fc.in_features, self.input_size)

            # Init weights (should be moved.)
            self.embed.fc.weight.data.normal_(0.0, 0.02)
            self.embed.fc.bias.data.fill_(0)
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def __init__(self, nclass):
        super(Dilated_ResNet, self).__init__()
        self.pretrained = dresnet.resnet50(pretrained=True)
项目:PyTorch-Encoding    作者:zhanghang1989    | 项目源码 | 文件源码
def __init__(self, nclass):
        super(Org_ResNet, self).__init__()
        self.pretrained = orgresnet.resnet50(pretrained=True)
项目:weldon.resnet.pytorch    作者:durandtibo    | 项目源码 | 文件源码
def resnet50_weldon(num_classes, pretrained=True, kmax=1, kmin=None):
    model = models.resnet50(pretrained)
    pooling = WeldonPool2d(kmax, kmin)
    return ResNetWSL(model, num_classes, pooling=pooling)
项目:PytorchDL    作者:FredHuangBia    | 项目源码 | 文件源码
def __init__(self, opt):
        super().__init__()
        self.opt = opt

        if opt.netSpec == 'resnet101':
            resnet = models.resnet101(pretrained=opt.pretrain)
        elif opt.netSpec == 'resnet50':
            resnet = models.resnet50(pretrained=opt.pretrain)
        elif opt.netSpec == 'resnet34':
            resnet = models.resnet34(pretrained=opt.pretrain)

        self.conv1 = resnet.conv1
        self.layer1 = resnet.layer1
        self.layer2 = resnet.layer2
        self.layer3 = resnet.layer3
        self.layer4 = resnet.layer4

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # m.stride = 1
                m.requires_grad = False
            if isinstance(m, nn.BatchNorm2d):
                m.requires_grad = False

        self.layer5a = PSPDec(512, 128, (1,1))
        self.layer5b = PSPDec(512, 128, (2,2))
        self.layer5c = PSPDec(512, 128, (3,3))
        self.layer5d = PSPDec(512, 128, (6,6))

        self.final = nn.Sequential(
            nn.Conv2d(512*2, 512, 3, padding=1, bias=False),
            nn.BatchNorm2d(512, momentum=.95),
            nn.ReLU(inplace=True),
            nn.Dropout(.1),
            nn.Conv2d(512, opt.numClasses, 1),
        )
项目:pretrained-models.pytorch    作者:Cadene    | 项目源码 | 文件源码
def resnet50(num_classes=1000, pretrained='imagenet'):
    """Constructs a ResNet-50 model.
    """
    model = models.resnet50(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['resnet50'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_resnets(model)
    return model