Python lasagne.layers 模块,GlobalPoolLayer() 实例源码

我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用lasagne.layers.GlobalPoolLayer()

项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_GlobalPoolLayer(self, layer, feeder):
        assert isinstance(layer, L.GlobalPoolLayer)
        assert layer.pool_function == T.mean
        assert len(L.get_output_shape(layer.input_layer)) == 4

        target_shape = L.get_output_shape(feeder)+(1,1)
        if target_shape[0] is None:
            target_shape = (-1,) + target_shape[1:]

        feeder = L.ReshapeLayer(feeder, target_shape)

        upscaling = L.get_output_shape(layer.input_layer)[2:]
        feeder = L.Upscale2DLayer(feeder, upscaling)

        def expression(x):
            return x / np.prod(upscaling).astype(theano.config.floatX)
        feeder = L.ExpressionLayer(feeder, expression)
        return feeder
项目:experiments    作者:tencia    | 项目源码 | 文件源码
def build_fcae(input_var, channels=1):
    ret = {}
    ret['input'] = layer = InputLayer(shape=(None, channels, None, None), input_var=input_var)
    ret['conv1'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=5, pad='full'))
    ret['pool1'] = layer =  MaxPool2DLayer(layer, pool_size=2)
    ret['conv2'] = layer = bn(Conv2DLayer(layer, num_filters=256, filter_size=3, pad='full'))
    ret['pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['conv3'] = layer = bn(Conv2DLayer(layer, num_filters=32, filter_size=3, pad='full'))
    ret['enc'] = layer = GlobalPoolLayer(layer)
    ret['ph1'] = layer = NonlinearityLayer(layer, nonlinearity=None)
    ret['ph2'] = layer = NonlinearityLayer(layer, nonlinearity=None)
    ret['unenc'] = layer = bn(InverseLayer(layer, ret['enc']))
    ret['deconv3'] = layer = bn(Conv2DLayer(layer, num_filters=256, filter_size=3))
    ret['depool2'] = layer = InverseLayer(layer, ret['pool2'])
    ret['deconv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['depool1'] = layer = InverseLayer(layer, ret['pool1'])
    ret['output'] = layer = Conv2DLayer(layer, num_filters=1, filter_size=5,
                                     nonlinearity=nn.nonlinearities.sigmoid)
    return ret
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_layer(self, layer, feeder):
        layer_type = type(layer)

        if L.get_output_shape(feeder) != L.get_output_shape(layer):
            feeder = L.ReshapeLayer(feeder, (-1,)+L.get_output_shape(layer)[1:])
        if layer_type is L.InputLayer:
            return self._invert_InputLayer(layer, feeder)
        elif layer_type is L.FlattenLayer:
            return self._invert_FlattenLayer(layer, feeder)
        elif layer_type is L.DenseLayer:
            return self._invert_DenseLayer(layer, feeder)
        elif layer_type is L.Conv2DLayer:
            return self._invert_Conv2DLayer(layer, feeder)
        elif layer_type is L.DropoutLayer:
            return self._invert_DropoutLayer(layer, feeder)
        elif layer_type in [L.MaxPool2DLayer, L.MaxPool1DLayer]:
            return self._invert_MaxPoolingLayer(layer, feeder)
        elif layer_type is L.PadLayer:
            return self._invert_PadLayer(layer, feeder)
        elif layer_type is L.SliceLayer:
            return self._invert_SliceLayer(layer, feeder)
        elif layer_type is L.LocalResponseNormalization2DLayer:
            return self._invert_LocalResponseNormalisation2DLayer(layer, feeder)
        elif layer_type is L.GlobalPoolLayer:
            return self._invert_GlobalPoolLayer(layer, feeder)
        else:
            return self._invert_UnknownLayer(layer, feeder)
项目:foolbox    作者:bethgelab    | 项目源码 | 文件源码
def test_lasagne_model(num_classes):
    bounds = (0, 255)
    channels = num_classes

    def mean_brightness_net(images):
        logits = GlobalPoolLayer(images)
        return logits

    images_var = T.tensor4('images', dtype='float32')
    images = InputLayer((None, channels, 5, 5), images_var)
    logits = mean_brightness_net(images)

    model = LasagneModel(
        images,
        logits,
        bounds=bounds)

    test_images = np.random.rand(2, channels, 5, 5).astype(np.float32)
    test_label = 7

    assert model.batch_predictions(test_images).shape \
        == (2, num_classes)

    test_logits = model.predictions(test_images[0])
    assert test_logits.shape == (num_classes,)

    test_gradient = model.gradient(test_images[0], test_label)
    assert test_gradient.shape == test_images[0].shape

    np.testing.assert_almost_equal(
        model.predictions_and_gradient(test_images[0], test_label)[0],
        test_logits)
    np.testing.assert_almost_equal(
        model.predictions_and_gradient(test_images[0], test_label)[1],
        test_gradient)

    assert model.num_classes() == num_classes
项目:foolbox    作者:bethgelab    | 项目源码 | 文件源码
def test_lasagne_gradient(num_classes):
    bounds = (0, 255)
    channels = num_classes

    def mean_brightness_net(images):
        logits = GlobalPoolLayer(images)
        return logits

    images_var = T.tensor4('images', dtype='float32')
    images = InputLayer((None, channels, 5, 5), images_var)
    logits = mean_brightness_net(images)

    preprocessing = (np.arange(num_classes)[None, None],
                     np.random.uniform(size=(5, 5, channels)) + 1)

    model = LasagneModel(
        images,
        logits,
        preprocessing=preprocessing,
        bounds=bounds)

    epsilon = 1e-2

    np.random.seed(23)
    test_image = np.random.rand(channels, 5, 5).astype(np.float32)
    test_label = 7

    _, g1 = model.predictions_and_gradient(test_image, test_label)

    l1 = model._loss_fn(test_image[None] - epsilon / 2 * g1, [test_label])[0]
    l2 = model._loss_fn(test_image[None] + epsilon / 2 * g1, [test_label])[0]

    # make sure that gradient is numerically correct
    np.testing.assert_array_almost_equal(
        1e4 * (l2 - l1),
        1e4 * epsilon * np.linalg.norm(g1)**2,
        decimal=1)
项目:foolbox    作者:bethgelab    | 项目源码 | 文件源码
def test_lasagne_backward(num_classes):
    bounds = (0, 255)
    channels = num_classes

    def mean_brightness_net(images):
        logits = GlobalPoolLayer(images)
        return logits

    images_var = T.tensor4('images', dtype='float32')
    images = InputLayer((None, channels, 5, 5), images_var)
    logits = mean_brightness_net(images)

    model = LasagneModel(
        images,
        logits,
        bounds=bounds)

    test_image = np.random.rand(channels, 5, 5).astype(np.float32)
    test_grad_pre = np.random.rand(num_classes).astype(np.float32)

    test_grad = model.backward(test_grad_pre, test_image)
    assert test_grad.shape == test_image.shape

    manual_grad = np.repeat(np.repeat(
        (test_grad_pre / 25.).reshape((-1, 1, 1)),
        5, axis=1), 5, axis=2)

    np.testing.assert_almost_equal(
        test_grad,
        manual_grad)
项目:MIX-plus-GAN    作者:yz-ignescent    | 项目源码 | 文件源码
def get_discriminator(self):
        ''' specify discriminator D0 '''
        """
        disc0_layers = [LL.InputLayer(shape=(self.args.batch_size, 3, 32, 32))]
        disc0_layers.append(LL.GaussianNoiseLayer(disc0_layers[-1], sigma=0.05))
        disc0_layers.append(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 16x16
        disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu)))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 8x8
        disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=0, W=Normal(0.02), nonlinearity=nn.lrelu))) # 6x6
        disc0_layer_shared = LL.NINLayer(disc0_layers[-1], num_units=192, W=Normal(0.02), nonlinearity=nn.lrelu) # 6x6
        disc0_layers.append(disc0_layer_shared)

        disc0_layer_z_recon = LL.DenseLayer(disc0_layer_shared, num_units=50, W=Normal(0.02), nonlinearity=None)
        disc0_layers.append(disc0_layer_z_recon) # also need to recover z from x

        disc0_layers.append(LL.GlobalPoolLayer(disc0_layer_shared))
        disc0_layer_adv = LL.DenseLayer(disc0_layers[-1], num_units=10, W=Normal(0.02), nonlinearity=None)
        disc0_layers.append(disc0_layer_adv)

        return disc0_layers, disc0_layer_adv, disc0_layer_z_recon
        """
        disc_x_layers = [LL.InputLayer(shape=(None, 3, 32, 32))]
        disc_x_layers.append(LL.GaussianNoiseLayer(disc_x_layers[-1], sigma=0.2))
        disc_x_layers.append(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=0, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers_shared = LL.NINLayer(disc_x_layers[-1], num_units=192, W=Normal(0.01), nonlinearity=nn.lrelu)
        disc_x_layers.append(disc_x_layers_shared)

        disc_x_layer_z_recon = LL.DenseLayer(disc_x_layers_shared, num_units=self.args.z0dim, nonlinearity=None)
        disc_x_layers.append(disc_x_layer_z_recon) # also need to recover z from x

        # disc_x_layers.append(nn.MinibatchLayer(disc_x_layers_shared, num_kernels=100))
        disc_x_layers.append(LL.GlobalPoolLayer(disc_x_layers_shared))
        disc_x_layer_adv = LL.DenseLayer(disc_x_layers[-1], num_units=10, W=Normal(0.01), nonlinearity=None)
        disc_x_layers.append(disc_x_layer_adv)

        #output_before_softmax_x = LL.get_output(disc_x_layer_adv, x, deterministic=False)
        #output_before_softmax_gen = LL.get_output(disc_x_layer_adv, gen_x, deterministic=False)

        # temp = LL.get_output(gen_x_layers[-1], deterministic=False, init=True)
        # temp = LL.get_output(disc_x_layers[-1], x, deterministic=False, init=True)
        # init_updates = [u for l in LL.get_all_layers(gen_x_layers)+LL.get_all_layers(disc_x_layers) for u in getattr(l,'init_updates',[])]
        return disc_x_layers, disc_x_layer_adv, disc_x_layer_z_recon
项目:triple-gan    作者:zhenxuan00    | 项目源码 | 文件源码
def build_network():
    conv_defs = {
        'W': lasagne.init.HeNormal('relu'),
        'b': lasagne.init.Constant(0.0),
        'filter_size': (3, 3),
        'stride': (1, 1),
        'nonlinearity': lasagne.nonlinearities.LeakyRectify(0.1)
    }

    nin_defs = {
        'W': lasagne.init.HeNormal('relu'),
        'b': lasagne.init.Constant(0.0),
        'nonlinearity': lasagne.nonlinearities.LeakyRectify(0.1)
    }

    dense_defs = {
        'W': lasagne.init.HeNormal(1.0),
        'b': lasagne.init.Constant(0.0),
        'nonlinearity': lasagne.nonlinearities.softmax
    }

    wn_defs = {
        'momentum': .999
    }

    net = InputLayer        (     name='input',    shape=(None, 3, 32, 32))
    net = GaussianNoiseLayer(net, name='noise',    sigma=.15)
    net = WN(Conv2DLayer    (net, name='conv1a',   num_filters=128, pad='same', **conv_defs), **wn_defs)
    net = WN(Conv2DLayer    (net, name='conv1b',   num_filters=128, pad='same', **conv_defs), **wn_defs)
    net = WN(Conv2DLayer    (net, name='conv1c',   num_filters=128, pad='same', **conv_defs), **wn_defs)
    net = MaxPool2DLayer    (net, name='pool1',    pool_size=(2, 2))
    net = DropoutLayer      (net, name='drop1',    p=.5)
    net = WN(Conv2DLayer    (net, name='conv2a',   num_filters=256, pad='same', **conv_defs), **wn_defs)
    net = WN(Conv2DLayer    (net, name='conv2b',   num_filters=256, pad='same', **conv_defs), **wn_defs)
    net = WN(Conv2DLayer    (net, name='conv2c',   num_filters=256, pad='same', **conv_defs), **wn_defs)
    net = MaxPool2DLayer    (net, name='pool2',    pool_size=(2, 2))
    net = DropoutLayer      (net, name='drop2',    p=.5)
    net = WN(Conv2DLayer    (net, name='conv3a',   num_filters=512, pad=0,      **conv_defs), **wn_defs)
    net = WN(NINLayer       (net, name='conv3b',   num_units=256,               **nin_defs),  **wn_defs)
    net = WN(NINLayer       (net, name='conv3c',   num_units=128,               **nin_defs),  **wn_defs)
    net = GlobalPoolLayer   (net, name='pool3')
    net = WN(DenseLayer     (net, name='dense',    num_units=10,       **dense_defs), **wn_defs)

    return net
项目:kaggle-dsg-qualification    作者:Ignotus    | 项目源码 | 文件源码
def build_model(self, input_var, forward, dropout):
        net = dict()
        net['input'] = InputLayer((None, 3, None, None), input_var=input_var)
        net['conv1/7x7_s2'] = ConvLayer(
            net['input'], 64, 7, stride=2, pad=3, flip_filters=False)
        net['pool1/3x3_s2'] = PoolLayer(
            net['conv1/7x7_s2'], pool_size=3, stride=2, ignore_border=False)
        net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
        net['conv2/3x3_reduce'] = ConvLayer(
            net['pool1/norm1'], 64, 1, flip_filters=False)
        net['conv2/3x3'] = ConvLayer(
            net['conv2/3x3_reduce'], 192, 3, pad=1, flip_filters=False)
        net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
        net['pool2/3x3_s2'] = PoolLayerDNN(net['conv2/norm2'], pool_size=3, stride=2)

        net.update(self.build_inception_module('inception_3a',
                                               net['pool2/3x3_s2'],
                                               [32, 64, 96, 128, 16, 32]))
        net.update(self.build_inception_module('inception_3b',
                                               net['inception_3a/output'],
                                               [64, 128, 128, 192, 32, 96]))
        net['pool3/3x3_s2'] = PoolLayerDNN(net['inception_3b/output'],
                                           pool_size=3, stride=2)

        net.update(self.build_inception_module('inception_4a',
                                               net['pool3/3x3_s2'],
                                               [64, 192, 96, 208, 16, 48]))
        net.update(self.build_inception_module('inception_4b',
                                               net['inception_4a/output'],
                                               [64, 160, 112, 224, 24, 64]))
        net.update(self.build_inception_module('inception_4c',
                                               net['inception_4b/output'],
                                               [64, 128, 128, 256, 24, 64]))
        net.update(self.build_inception_module('inception_4d',
                                               net['inception_4c/output'],
                                               [64, 112, 144, 288, 32, 64]))
        net.update(self.build_inception_module('inception_4e',
                                               net['inception_4d/output'],
                                               [128, 256, 160, 320, 32, 128]))
        net['pool4/3x3_s2'] = PoolLayerDNN(net['inception_4e/output'],
                                           pool_size=3, stride=2)

        net.update(self.build_inception_module('inception_5a',
                                               net['pool4/3x3_s2'],
                                               [128, 256, 160, 320, 32, 128]))
        net.update(self.build_inception_module('inception_5b',
                                               net['inception_5a/output'],
                                               [128, 384, 192, 384, 48, 128]))

        net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])

        if forward:
            #net['fc6'] = DenseLayer(net['pool5/7x7_s1'], num_units=1000)
            net['prob'] = DenseLayer(net['pool5/7x7_s1'], num_units=4, nonlinearity=softmax)
        else:
            net['dropout1'] = DropoutLayer(net['pool5/7x7_s1'], p=dropout)
            #net['fc6'] = DenseLayer(net['dropout1'], num_units=1000)
            #net['dropout2'] = DropoutLayer(net['fc6'], p=dropout)
            net['prob'] = DenseLayer(net['dropout1'], num_units=4, nonlinearity=softmax)
        return net