Python torch.nn 模块,Linear() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.Linear()

项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def __init__(self, input_size, feature_size = 128, hidden_size = 256, num_layers = 1, dropout = 0.9):
        super(SeqEncoder, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        # set up modules for recurrent neural networks
        self.rnn = nn.LSTM(input_size = input_size,
                           hidden_size = hidden_size,
                           num_layers = num_layers,
                           batch_first = True,
                           dropout = dropout,
                           bidirectional = True)
        self.rnn.apply(weights_init)

        # set up modules to compute features
        self.feature = nn.Linear(hidden_size * 2, feature_size)
        self.feature.apply(weights_init)
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def __init__(self, input_size, num_classes, hidden_size = 256, num_layers = 1, dropout = 0.9):
        super(SeqLabeler, self).__init__()
        self.num_classes = num_classes
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        # set up modules for recurrent neural networks
        self.rnn = nn.LSTM(input_size = input_size,
                           hidden_size = hidden_size,
                           num_layers = num_layers,
                           batch_first = True,
                           dropout = dropout,
                           bidirectional = True)
        self.rnn.apply(weights_init)

        # set up modules to compute classification
        self.classifier = nn.Linear(hidden_size * 2, num_classes)
        self.classifier.apply(weights_init)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def __init__(self, batch_size, word_gru_hidden, feature_dim, n_classes, bidirectional=True):        

        super(MixtureSoftmax, self).__init__()

        # for feature only model 
        word_gru_hidden = 0
        # end

        self.batch_size = batch_size
        self.n_classes = n_classes
        self.word_gru_hidden = word_gru_hidden
        self.feature_dim = feature_dim

        if bidirectional == True:
            self.linear = nn.Linear(2 * 2 * word_gru_hidden + feature_dim, n_classes)
        else:
            self.linear = nn.Linear(2 * word_gru_hidden + feature_dim, n_classes)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def __init__(self, batch_size, word_gru_hidden, feature_dim, n_classes, bidirectional=True):        

        super(MixtureSoftmax, self).__init__()

        # for feature only model 
        word_gru_hidden = 0
        # end

        self.batch_size = batch_size
        self.n_classes = n_classes
        self.word_gru_hidden = word_gru_hidden
        self.feature_dim = feature_dim

        if bidirectional == True:
            self.linear = nn.Linear(2 * 2 * word_gru_hidden + feature_dim, n_classes)
        else:
            self.linear = nn.Linear(2 * word_gru_hidden + feature_dim, n_classes)
项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def __init__(self, num_classes=1000,
                 block=Bottleneck, layers=[3, 4, 23, 3]):
        super(ResNet_imagenet, self).__init__()
        self.inplanes = 64
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        init_model(self)
        self.regime = [
            {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1,
             'weight_decay': 1e-4, 'momentum': 0.9},
            {'epoch': 30, 'lr': 1e-2},
            {'epoch': 60, 'lr': 1e-3, 'weight_decay': 0},
            {'epoch': 90, 'lr': 1e-4}
        ]
项目:convNet.pytorch    作者:eladhoffer    | 项目源码 | 文件源码
def __init__(self, num_classes=10,
                 block=BasicBlock, depth=18):
        super(ResNet_cifar10, self).__init__()
        self.inplanes = 16
        n = int((depth - 2) / 6)
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(16)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = lambda x: x
        self.layer1 = self._make_layer(block, 16, n)
        self.layer2 = self._make_layer(block, 32, n, stride=2)
        self.layer3 = self._make_layer(block, 64, n, stride=2)
        self.layer4 = lambda x: x
        self.avgpool = nn.AvgPool2d(8)
        self.fc = nn.Linear(64, num_classes)

        init_model(self)
        self.regime = [
            {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1,
             'weight_decay': 1e-4, 'momentum': 0.9},
            {'epoch': 81, 'lr': 1e-2},
            {'epoch': 122, 'lr': 1e-3, 'weight_decay': 0},
            {'epoch': 164, 'lr': 1e-4}
        ]
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def R_weight_init(ms):
    for m in ms.modules():
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data = init.kaiming_normal(m.weight.data)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)
        elif classname.find('Linear') != -1:
            m.weight.data = init.kaiming_normal(m.weight.data)


############################
# G network
###########################
# custom weights initialization called on netG
项目:ResNeXt-DenseNet    作者:D-X-Y    | 项目源码 | 文件源码
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:speed    作者:keon    | 项目源码 | 文件源码
def __init__(self, n_layers=2, h_size=512):
        super(ResLSTM, self).__init__()
        print('Building AlexNet + LSTM model...')
        self.h_size = h_size
        self.n_layers = n_layers

        resnet = models.resnet50(pretrained=True)
        self.conv = nn.Sequential(*list(resnet.children())[:-1])

        self.lstm = nn.LSTM(1280, h_size, dropout=0.2, num_layers=n_layers)
        self.fc = nn.Sequential(
            nn.Linear(h_size, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 1)
        )
项目:speed    作者:keon    | 项目源码 | 文件源码
def __init__(self, h_size=512, n_layers=3):
        super(DenseLSTM, self).__init__()
        print('Building DenseNet + LSTM model...')
        self.h_size = h_size
        self.n_layers = n_layers

        densenet = models.densenet201(pretrained=True)
        self.conv = nn.Sequential(*list(densenet.children())[:-1])

        self.lstm = nn.LSTM(23040, h_size, dropout=0.2, num_layers=n_layers)
        self.fc = nn.Sequential(
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(256, 1)
        )
项目:speed    作者:keon    | 项目源码 | 文件源码
def __init__(self, n_layers=2, h_size=420):
        super(AlexLSTM, self).__init__()
        print('Building AlexNet + LSTM model...')
        self.h_size = h_size
        self.n_layers = n_layers

        alexnet = models.alexnet(pretrained=True)
        self.conv = nn.Sequential(*list(alexnet.children())[:-1])

        self.lstm = nn.LSTM(1280, h_size, dropout=0.2, num_layers=n_layers)
        self.fc = nn.Sequential(
            nn.Linear(h_size, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 1)
        )
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __init__(self):
        super(GlobalFeatNet, self).__init__()
        self.conv1 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
        self.bn1 = nn.BatchNorm2d(512)
        self.conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(512)
        self.conv3 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
        self.bn3 = nn.BatchNorm2d(512)
        self.conv4 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(512)
        self.fc1 = nn.Linear(25088, 1024)
        self.bn5 = nn.BatchNorm1d(1024)
        self.fc2 = nn.Linear(1024, 512)
        self.bn6 = nn.BatchNorm1d(512)
        self.fc3 = nn.Linear(512, 256)
        self.bn7 = nn.BatchNorm1d(256)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_parameters(self):
        def num_params(module):
            return len(list(module.parameters()))
        class Net(nn.Container):
            def __init__(self):
                super(Net, self).__init__(
                    l1=l,
                    l2=l
                )
                self.param = Parameter(torch.Tensor(3, 5))
        l = nn.Linear(10, 20)
        n = Net()
        s = nn.Sequential(n, n, n, n)
        self.assertEqual(num_params(l), 2)
        self.assertEqual(num_params(n), 3)
        self.assertEqual(num_params(s), 3)
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_parallel_apply(self):
        l1 = nn.Linear(10, 5).float().cuda(0)
        l2 = nn.Linear(10, 5).float().cuda(1)
        i1 = Variable(torch.randn(2, 10).float().cuda(0))
        i2 = Variable(torch.randn(2, 10).float().cuda(1))
        expected1 = l1(i1).data
        expected2 = l2(i2).data
        inputs = (i1, i2)
        modules = (l1, l2)
        expected_outputs = (expected1, expected2)
        outputs = dp.parallel_apply(modules, inputs)
        for out, expected in zip(outputs, expected_outputs):
            self.assertEqual(out.data, expected)

        inputs = (i1, Variable(i2.data.new()))
        expected_outputs = (expected1, expected2.new())
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def test_load_parameter_dict(self):
        l = nn.Linear(5, 5)
        block = nn.Container(
            conv=nn.Conv2d(3, 3, 3, bias=False)
        )
        net = nn.Container(
            linear1=l,
            linear2=l,
            block=block,
            empty=None,
        )
        param_dict = {
            'linear1.weight': Variable(torch.ones(5, 5)),
            'block.conv.bias': Variable(torch.range(1, 3)),
        }
        net.load_parameter_dict(param_dict)
        self.assertIs(net.linear1.weight, param_dict['linear1.weight'])
        self.assertIs(net.block.conv.bias, param_dict['block.conv.bias'])
项目:kaggle-planet    作者:ZijunDeng    | 项目源码 | 文件源码
def _weights_init(model, pretrained):
    for m in model.modules():
        if pretrained:
            if isinstance(m, nn.Linear):
                n = m.weight.size(1)
                m.weight.data.normal_(0, math.sqrt(2. / n))
                m.bias.data.zero_()
        else:
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                n = m.weight.size(1)
                m.weight.data.normal_(0, math.sqrt(2. / n))
                m.bias.data.zero_()
项目:allennlp    作者:allenai    | 项目源码 | 文件源码
def __init__(self,
                 num_heads: int,
                 input_dim: int,
                 attention_dim: int,
                 values_dim: int,
                 output_projection_dim: int = None,
                 attention_dropout_prob: float = 0.1) -> None:
        super(MultiHeadSelfAttention, self).__init__()

        self._num_heads = num_heads
        self._input_dim = input_dim
        self._output_dim = output_projection_dim or input_dim
        self._attention_dim = attention_dim
        self._values_dim = values_dim

        self._query_projections = Parameter(torch.FloatTensor(num_heads, input_dim, attention_dim))
        self._key_projections = Parameter(torch.FloatTensor(num_heads, input_dim, attention_dim))
        self._value_projections = Parameter(torch.FloatTensor(num_heads, input_dim, values_dim))

        self._scale = input_dim ** 0.5
        self._output_projection = Linear(num_heads * values_dim,
                                         self._output_dim)
        self._attention_dropout = Dropout(attention_dropout_prob)

        self.reset_parameters()
项目:torch_light    作者:ne7ermore    | 项目源码 | 文件源码
def __init__(self, args):
        super().__init__()

        for k, v in args.__dict__.items():
            self.__setattr__(k, v)

        self.lookup_table = nn.Embedding(self.vocab_size, self.embed_dim)
        self.bi_lstm = nn.LSTM(self.embed_dim,
                               self.lstm_hsz,
                               num_layers=self.lstm_layers,
                               batch_first=True,
                               dropout=self.dropout,
                               bidirectional=True)

        self.logistic = nn.Linear(2*self.lstm_hsz, self.tag_size)
        self._init_weights(scope=self.w_init)
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet50(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet34(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet34(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet101(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return F.sigmoid(self.pretrained_model(x))

    return MyModel(torchvision.models.resnet101(pretrained=True))
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def generate_model():
    class MyModel(nn.Module):
        def __init__(self, pretrained_model):
            super(MyModel, self).__init__()
            self.pretrained_model = pretrained_model
            self.layer1 = pretrained_model.layer1
            self.layer2 = pretrained_model.layer2
            self.layer3 = pretrained_model.layer3
            self.layer4 = pretrained_model.layer4

            pretrained_model.avgpool = nn.AvgPool2d(8)
            classifier = [
                nn.Linear(pretrained_model.fc.in_features, 17),
            ]

            self.classifier = nn.Sequential(*classifier)
            pretrained_model.fc = self.classifier

        def forward(self, x):
            return self.pretrained_model(x)

    return MyModel(torchvision.models.resnet18(pretrained=True))
项目:MIL.pytorch    作者:gujiuxiang    | 项目源码 | 文件源码
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def __init__(self,out_size,gpu_id,num_seg):
        super(VC_inception_v4,self).__init__()
        sys.path.insert(0,'../tool/models_zoo/')
        from inceptionv4.pytorch_load import inceptionv4
        self.inception_v4=inceptionv4(pretrained=True).cuda()
        mod=[nn.Dropout(p=0.8)]#.cuda(self.gpu_id)]
        mod.append(nn.Linear(1536,101))#.cuda(self.gpu_id))
        new_fc=nn.Sequential(*mod)#.cuda(self.gpu_id)
        self.inception_v4.classif=new_fc
        self.num_seg=num_seg
        #self.resnet101.fc=nn.Linear(2048,101).cuda(gpu_id)

        self.avg_pool2d=nn.AvgPool2d(kernel_size=(3,1))#.cuda(self.gpu_id)
        # for params in self.inception_v4.parameters():
        #     params.requires_grad=False
        # for params in self.inception_v4.features[21].parameters():
        #     params.requires_grad=True
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def __init__(self):
        super(C3D_net,self).__init__()
        self.conv1=nn.Conv3d(3,64,kernel_size=(3,3,3),stride=1,padding=(1,1,1))
        self.relu=nn.ReLU()
        self.maxpool1=nn.MaxPool3d(kernel_size=(1,2,2),stride=(1,2,2))
        self.conv2=nn.Conv3d(64,128,kernel_size=(3,3,3),stride=1,padding=(1,1,1))
        self.maxpool2=nn.MaxPool3d(kernel_size=(2,2,2),stride=(2,2,2))
        self.conv3=nn.Conv3d(128,256,kernel_size=(3,3,3),stride=1,padding=(1,1,1))
        self.maxpool3=nn.MaxPool3d(kernel_size=(2,2,2),stride=(2,2,2))
        self.conv4=nn.Conv3d(256,256,kernel_size=(3,3,3),stride=1,padding=(1,1,1))
        self.maxpool4=nn.MaxPool3d(kernel_size=(2,2,2),stride=(2,2,2))
        self.conv5=nn.Conv3d(256,256,kernel_size=(3,3,3),stride=1,padding=(1,1,1))
        self.maxpool5=nn.MaxPool3d(kernel_size=(2,2,2),stride=(2,2,2))
        self.num_out_maxpool5=2304
        self.fc6=nn.Linear(self.num_out_maxpool5,2048)#TBA
        self.fc7=nn.Linear(2048,2048)
        #self.dropout=nn.Dropout(p=0.5)
        self.fc8=nn.Linear(2048,101)
        self._initialize_weights()
项目:pyro    作者:uber    | 项目源码 | 文件源码
def __init__(self, z_dim, transition_dim):
        super(GatedTransition, self).__init__()
        # initialize the six linear transformations used in the neural network
        self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)
        self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)
        self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)
        self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)
        self.lin_sig = nn.Linear(z_dim, z_dim)
        self.lin_z_to_mu = nn.Linear(z_dim, z_dim)
        # modify the default initialization of lin_z_to_mu
        # so that it's starts out as the identity function
        self.lin_z_to_mu.weight.data = torch.eye(z_dim)
        self.lin_z_to_mu.bias.data = torch.zeros(z_dim)
        # initialize the three non-linearities used in the neural network
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        self.softplus = nn.Softplus()
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def _make_test_model():
        import torch.nn as nn
        from inferno.extensions.layers.reshape import AsMatrix

        toy_net = nn.Sequential(nn.Conv2d(3, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 128, 3, 1, 1),
                                nn.ELU(),
                                nn.MaxPool2d(2),
                                nn.Conv2d(128, 256, 3, 1, 1),
                                nn.ELU(),
                                nn.AdaptiveAvgPool2d((1, 1)),
                                AsMatrix(),
                                nn.Linear(256, 10),
                                nn.Softmax())
        return toy_net
项目:inferno    作者:inferno-pytorch    | 项目源码 | 文件源码
def build_graph_model(self):
        model = Graph()
        model\
            .add_input_node('input')\
            .add_node('conv1', Conv2D(3, 32, 3), 'input')\
            .add_node('conv2', BNReLUConv2D(32, 32, 3), 'conv1')\
            .add_node('pool1', nn.MaxPool2d(kernel_size=2, stride=2), 'conv2')\
            .add_node('conv3', BNReLUConv2D(32, 32, 3), 'pool1')\
            .add_node('pool2', nn.MaxPool2d(kernel_size=2, stride=2), 'conv3')\
            .add_node('conv4', BNReLUConv2D(32, 32, 3), 'pool2')\
            .add_node('pool3', nn.AdaptiveAvgPool2d(output_size=(1, 1)), 'conv4')\
            .add_node('matrix', AsMatrix(), 'pool3')\
            .add_node('linear', nn.Linear(32, self.NUM_CLASSES), 'matrix')\
            .add_node('softmax', nn.Softmax(), 'linear')\
            .add_output_node('output', 'softmax')
        return model
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def __init__(self, shared_resources: SharedResources):
        super(FastQAPyTorchModule, self).__init__()
        self._shared_resources = shared_resources
        input_size = shared_resources.config["repr_dim_input"]
        size = shared_resources.config["repr_dim"]
        self._size = size
        self._with_char_embeddings = self._shared_resources.config.get("with_char_embeddings", False)

        # modules & parameters
        if self._with_char_embeddings:
            self._conv_char_embedding = embedding.ConvCharEmbeddingModule(
                len(shared_resources.char_vocab), size)
            self._embedding_projection = nn.Linear(size + input_size, size)
            self._embedding_highway = Highway(size, 1)
            self._v_wiq_w = nn.Parameter(torch.ones(1, 1, input_size + size))
            input_size = size
        else:
            self._v_wiq_w = nn.Parameter(torch.ones(1, 1, input_size))

        self._bilstm = BiLSTM(input_size + 2, size)
        self._answer_layer = FastQAAnswerModule(shared_resources)

        # [size, 2 * size]
        self._question_projection = nn.Parameter(torch.cat([torch.eye(size), torch.eye(size)], dim=1))
        self._support_projection = nn.Parameter(torch.cat([torch.eye(size), torch.eye(size)], dim=1))
项目:PyTorchDemystified    作者:hhsecond    | 项目源码 | 文件源码
def __init__(self, config):
        super(SNLIClassifier, self).__init__()
        self.config = config
        self.embed = nn.Embedding(config.n_embed, config.d_embed)
        self.projection = Linear(config.d_embed, config.d_proj)
        self.embed_bn = BatchNorm(config.d_proj)
        self.embed_dropout = nn.Dropout(p=config.embed_dropout)
        self.encoder = SPINN(config) if config.spinn else Encoder(config)
        feat_in_size = config.d_hidden * (
            2 if self.config.birnn and not self.config.spinn else 1)
        self.feature = Feature(feat_in_size, config.mlp_dropout)
        self.mlp_dropout = nn.Dropout(p=config.mlp_dropout)
        self.relu = nn.ReLU()
        mlp_in_size = 4 * feat_in_size
        mlp = [nn.Linear(mlp_in_size, config.d_mlp), self.relu,
               nn.BatchNorm1d(config.d_mlp), self.mlp_dropout]
        for i in range(config.n_mlp_layers - 1):
            mlp.extend([nn.Linear(config.d_mlp, config.d_mlp), self.relu,
                        nn.BatchNorm1d(config.d_mlp), self.mlp_dropout])
        mlp.append(nn.Linear(config.d_mlp, config.d_out))
        self.out = nn.Sequential(*mlp)
项目:chinese_generation    作者:polaroidz    | 项目源码 | 文件源码
def __init__(self):
        super().__init__()

        self.conv1 = nn.Conv2d(1, 256, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(256)

        self.max1 = nn.MaxPool2d(3)

        self.conv2 = nn.Conv2d(256, 512, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(512)

        self.max2 = nn.MaxPool2d(3)

        self.conv3 = nn.Conv2d(512, 1024, 3, padding=1)
        self.bn3 = nn.BatchNorm2d(1024)

        self.avg1 = nn.AvgPool2d(3)

        self.fc_mu = nn.Linear(1024, latent_space_size)
        self.fc_sig = nn.Linear(1024, latent_space_size)
项目:cnn-graph-classification    作者:giannisnik    | 项目源码 | 文件源码
def __init__(self, input_size, hidden_size, num_classes, dim, num_kernels, max_document_length):
        super(CNN, self).__init__()
        self.max_document_length = max_document_length
        self.conv = nn.Conv3d(1, input_size, (1, 1, dim), padding=0)
        self.fc1 = nn.Linear(input_size*num_kernels, hidden_size)
        self.fc2 = nn.Linear(hidden_size, num_classes)
        self.init_weights()
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def __init__(self, state_size, layer_sizes=[128, 128], dropout=0.0):
        super(DiscreteFeatures, self).__init__()
        if dropout != 0.0:
            raise Exception('Dropout not supported yet.')
        self.affine1 = nn.Linear(state_size, layer_sizes[0])
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def __init__(self, state_size, layer_sizes=[128, 128], dropout=0.0):
        super(ContinuousFeatures, self).__init__()
        if dropout != 0.0:
            raise Exception('Dropout not supported yet.')
        self.affine1 = nn.Linear(state_size, layer_sizes[0])
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def __init__(self, feature_extractor=None, action_size=None, features_size=None, recurrent=False):
        super(DiscreteActor, self).__init__()
        self.feature_extractor = feature_extractor
        self.linear = nn.Linear(features_size, action_size)
        self.recurrent = recurrent
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def __init__(self, feature_extractor=None, action_size=None, features_size=None, recurrent=False):
        super(ContinuousActor, self).__init__()
        self.feature_extractor = feature_extractor
        self.linear = nn.Linear(features_size, action_size, bias=False)
        self.recurrent = recurrent
        self.linear.weight.data *= 0.1
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def __init__(self, feature_extractor, state_size, recurrent=False):
        super(Critic, self).__init__()
        self.feature_extractor = feature_extractor
        self.linear = nn.Linear(state_size, 1)
        self.recurrent = recurrent
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def __init__(self, state_size):
        super(BaselineCritic, self).__init__()
        self.fc1 = nn.Linear(state_size, 64, bias=True)
        self.fc2 = nn.Linear(64, 64, bias=True)
        self.value = nn.Linear(64, 1, bias=True)

        # Init
        for p in [self.fc1, self.fc2, self.value]:
            p.weight.data.normal_(0, 1)
            p.weight.data *= 1.0 / th.sqrt(p.weight.data.pow(2).sum(1, keepdim=True))
            p.bias.data.mul_(0.0)
项目:Structured-Self-Attentive-Sentence-Embedding    作者:ExplorerFreda    | 项目源码 | 文件源码
def __init__(self, config):
        super(SelfAttentiveEncoder, self).__init__()
        self.bilstm = BiLSTM(config)
        self.drop = nn.Dropout(config['dropout'])
        self.ws1 = nn.Linear(config['nhid'] * 2, config['attention-unit'], bias=False)
        self.ws2 = nn.Linear(config['attention-unit'], config['attention-hops'], bias=False)
        self.tanh = nn.Tanh()
        self.softmax = nn.Softmax()
        self.dictionary = config['dictionary']
#        self.init_weights()
        self.attention_hops = config['attention-hops']
项目:Structured-Self-Attentive-Sentence-Embedding    作者:ExplorerFreda    | 项目源码 | 文件源码
def __init__(self, config):
        super(Classifier, self).__init__()
        if config['pooling'] == 'mean' or config['pooling'] == 'max':
            self.encoder = BiLSTM(config)
            self.fc = nn.Linear(config['nhid'] * 2, config['nfc'])
        elif config['pooling'] == 'all':
            self.encoder = SelfAttentiveEncoder(config)
            self.fc = nn.Linear(config['nhid'] * 2 * config['attention-hops'], config['nfc'])
        else:
            raise Exception('Error when initializing Classifier')
        self.drop = nn.Dropout(config['dropout'])
        self.tanh = nn.Tanh()
        self.pred = nn.Linear(config['nfc'], config['class-number'])
        self.dictionary = config['dictionary']
#        self.init_weights()
项目:crnn    作者:wulivicte    | 项目源码 | 文件源码
def __init__(self, nIn, nHidden, nOut):
        super(BidirectionalLSTM, self).__init__()

        self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
        self.embedding = nn.Linear(nHidden * 2, nOut)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def __init__(self, input_size, hidden_size, num_layers, num_classes):
        super(RNN_LSTM, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.num_classes = num_classes
        self.lstm1 = nn.LSTMCell(input_size, hidden_size)
        self.fc = nn.Linear(hidden_size, num_classes)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
        super(ResNeXt, self).__init__()
        self.cardinality = cardinality
        self.bottleneck_width = bottleneck_width
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(num_blocks[0], 1)
        self.layer2 = self._make_layer(num_blocks[1], 2)
        self.layer3 = self._make_layer(num_blocks[2], 2)
        # self.layer4 = self._make_layer(num_blocks[3], 2)
        self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)
项目:pytorch_tutorial    作者:soravux    | 项目源码 | 文件源码
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(320, 50)
        self.fc2 = nn.Linear(50, 10)