Python lasagne.layers 模块,get_output_shape() 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用lasagne.layers.get_output_shape()

项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_GlobalPoolLayer(self, layer, feeder):
        assert isinstance(layer, L.GlobalPoolLayer)
        assert layer.pool_function == T.mean
        assert len(L.get_output_shape(layer.input_layer)) == 4

        target_shape = L.get_output_shape(feeder)+(1,1)
        if target_shape[0] is None:
            target_shape = (-1,) + target_shape[1:]

        feeder = L.ReshapeLayer(feeder, target_shape)

        upscaling = L.get_output_shape(layer.input_layer)[2:]
        feeder = L.Upscale2DLayer(feeder, upscaling)

        def expression(x):
            return x / np.prod(upscaling).astype(theano.config.floatX)
        feeder = L.ExpressionLayer(feeder, expression)
        return feeder
项目:crfrnn_layer    作者:HapeMask    | 项目源码 | 文件源码
def __init__(self, values, ref_img, sxy=60, sc=10, norm_type="sym",
                 name=None):

        C = ll.get_output_shape(ref_img)[1]
        if C not in [1, 3]:
            raise ValueError("Bilateral filtering requires a color or \
greyscale reference image. Got %d channels." % C)

        if C == 1:
            kern_std = np.array([sxy, sxy, sc], np.float32)
        else:
            kern_std = np.array([sxy, sxy, sc, sc, sc], np.float32)

        super(BilateralFilterLayer, self).__init__(values, ref_img, kern_std,
                                                   norm_type, name=name,
                                                   _bilateral=True)
项目:doublecnn    作者:Shuangfei    | 项目源码 | 文件源码
def build_vis(self, l, gamma, lr):
        conv_layer = self.conv_layers[l]
        nonlinearity = conv_layer.nonlinearity
        conv_layer.nonlinearity = lasagne.nonlinearities.identity
        output_shape = layers.get_output_shape(conv_layer)
        self.x_shared = theano.shared(numpy.zeros((output_shape[1], self.n_visible)).astype('float32'))
        conv_out = layers.get_output(conv_layer, inputs=self.x_shared, deterministic=True)
        idx = output_shape[2] / 2
        cost = -T.sum(conv_out[:, :, idx, idx].diagonal()) + \
               gamma * T.sum(self.x_shared**2)
        updates = lasagne.updates.adadelta(cost, [self.x_shared], learning_rate=lr)
        fn['train'] = theano.function([], cost, updates=updates)

        conv_layer.nonlinearity = nonlinearity

        return fn
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_DenseLayer(self,layer,feeder):
        # Warning they are swapped here
        feeder = self._put_rectifiers(feeder, layer)
        feeder = self._get_normalised_relevance_layer(layer, feeder)

        output_units = np.prod(L.get_output_shape(layer.input_layer)[1:])
        output_layer = L.DenseLayer(feeder, num_units=output_units)
        W = output_layer.W
        tmp_shape = np.asarray((-1,)+L.get_output_shape(output_layer)[1:])
        x_layer = L.ReshapeLayer(layer.input_layer, tmp_shape.tolist())
        output_layer = L.ElemwiseMergeLayer(incomings=[x_layer, output_layer],
                                            merge_function=T.mul)
        output_layer.W = W
        return output_layer
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_DenseLayer(self, layer, feeder):
        # Warning they are swapped here
        feeder = self._put_rectifiers(feeder, layer)
        output_units = np.prod(L.get_output_shape(layer.input_layer)[1:])
        output_layer = L.DenseLayer(feeder,
                                    num_units=output_units,
                                    nonlinearity=None, b=None)
        return output_layer
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_layer(self, layer, feeder):
        layer_type = type(layer)

        if L.get_output_shape(feeder) != L.get_output_shape(layer):
            feeder = L.ReshapeLayer(feeder, (-1,)+L.get_output_shape(layer)[1:])
        if layer_type is L.InputLayer:
            return self._invert_InputLayer(layer, feeder)
        elif layer_type is L.FlattenLayer:
            return self._invert_FlattenLayer(layer, feeder)
        elif layer_type is L.DenseLayer:
            return self._invert_DenseLayer(layer, feeder)
        elif layer_type is L.Conv2DLayer:
            return self._invert_Conv2DLayer(layer, feeder)
        elif layer_type is L.DropoutLayer:
            return self._invert_DropoutLayer(layer, feeder)
        elif layer_type in [L.MaxPool2DLayer, L.MaxPool1DLayer]:
            return self._invert_MaxPoolingLayer(layer, feeder)
        elif layer_type is L.PadLayer:
            return self._invert_PadLayer(layer, feeder)
        elif layer_type is L.SliceLayer:
            return self._invert_SliceLayer(layer, feeder)
        elif layer_type is L.LocalResponseNormalization2DLayer:
            return self._invert_LocalResponseNormalisation2DLayer(layer, feeder)
        elif layer_type is L.GlobalPoolLayer:
            return self._invert_GlobalPoolLayer(layer, feeder)
        else:
            return self._invert_UnknownLayer(layer, feeder)
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _init_network(self, patterns=None, **kwargs):
        self._remove_softmax()
        self.relevance_values = T.matrix()
        self._construct_layer_maps()
        tmp = self._invert_layer_recursion(self.input_layer, None)
        self.explain_output_layer = tmp

        # Call in any case. Patterns are not always needed.
        self._set_inverse_parameters(patterns=patterns)
        #print("\n\n\nNetwork")
        #for l in get_all_layers(self.explain_output_layer):
        #    print(type(l), get_output_shape(l))
        #print("\n\n\n")
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def get_conv_xy(layer, deterministic=True):
    w_np = layer.W.get_value()
    input_layer = layer.input_layer
    if layer.pad == 'same':
        input_layer = L.PadLayer(layer.input_layer,
                                 width=np.array(w_np.shape[2:])/2,
                                 batch_ndim=2)
    input_shape = L.get_output_shape(input_layer)
    max_x = input_shape[2] - w_np.shape[2]
    max_y = input_shape[3] - w_np.shape[3]
    srng = RandomStreams()
    patch_x = srng.random_integers(low=0, high=max_x)
    patch_y = srng.random_integers(low=0, high=max_y)

    #print("input_shape shape: ", input_shape)
    #print("pad: \"%s\""% (layer.pad,))
    #print(" stride: " ,layer.stride)
    #print("max_x %d max_y %d"%(max_x,max_y))

    x = L.get_output(input_layer, deterministic=deterministic)
    x = x[:, :,
          patch_x:patch_x + w_np.shape[2], patch_y:patch_y + w_np.shape[3]]
    x = T.flatten(x, 2)  # N,D

    w = layer.W
    if layer.flip_filters:
        w = w[:, :, ::-1, ::-1]
    w = T.flatten(w, outdim=2).T  # D,O
    y = T.dot(x, w) # N,O
    if layer.b is not None:
        y += T.shape_padaxis(layer.b, axis=0)
    return x, y
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def get_conv_xy_all(layer, deterministic=True):
    w_np = layer.W.get_value()
    w = layer.W
    if layer.flip_filters:
        w = w[:, :, ::-1, ::-1]

    input_layer = layer.input_layer
    if layer.pad == 'same':
        input_layer = L.PadLayer(layer.input_layer,
                                 width=np.array(w_np.shape[2:])//2,
                                 batch_ndim=2)
    input_shape = L.get_output_shape(input_layer)
    output_shape = L.get_output_shape(layer)
    max_x = input_shape[2] - w_np.shape[2]+1
    max_y = input_shape[3] - w_np.shape[3]+1
    #print("input_shape shape: ", input_shape)
    #print("output_shape shape: ", output_shape,np.prod(output_shape[2:]))
    #print("pad: \"%s\""%layer.pad)
    #print(" stride: " ,layer.stride)
    #print("max_x %d max_y %d"%(max_x,max_y))
    x_orig = L.get_output(input_layer, deterministic=True)

    x = theano.tensor.nnet.neighbours.images2neibs(x_orig,
                                                   neib_shape=layer.filter_size,
                                                   neib_step=layer.stride,
                                                   mode='valid')
    x = T.reshape(x, (x_orig.shape[0], -1,
                      np.prod(output_shape[2:]), np.prod(w_np.shape[2:])))
    x = T.transpose(x, (0, 2, 1, 3))
    x = T.reshape(x, (-1, T.prod(x.shape[2:])))

    w = T.flatten(w, outdim=2).T  # D,O
    y = T.dot(x, w) # N,O
    if layer.b is not None:
        y += T.shape_padaxis(layer.b, axis=0)
    return x, y
项目:crfrnn_layer    作者:HapeMask    | 项目源码 | 文件源码
def __init__(self, values, ref_img, kern_std, norm_type="sym",
                 name=None, trainable_kernels=False, _bilateral=False):

        assert(norm_type in ["sym", "pre", "post", None])
        super(GaussianFilterLayer, self).__init__(incomings=[values, ref_img],
                                                  name=name)

        self.val_dim = ll.get_output_shape(values)[1]
        self.ref_dim = ll.get_output_shape(ref_img)[1]

        if None in (self.val_dim, self.ref_dim):
            raise ValueError("Gaussian filtering requires known channel \
dimensions for all inputs.")

        self.norm_type = norm_type

        if _bilateral:
            self.ref_dim += 2

        if len(kern_std) != self.ref_dim:
            raise ValueError("Number of kernel weights must match reference \
dimensionality. Got %d weights for %d reference dims." % (len(kern_std),
                                                          self.ref_dim))

        self.kern_std = self.add_param(kern_std, (self.ref_dim,),
                                       name="kern_std",
                                       trainable=trainable_kernels,
                                       regularizable=False)
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_Conv2DLayer(self,layer,feeder):
        # Warning they are swapped here
        feeder = self._put_rectifiers(feeder,layer)
        feeder = self._get_normalised_relevance_layer(layer,feeder)

        f_s = layer.filter_size
        if layer.pad == 'same':
            pad = 'same'
        elif layer.pad == 'valid' or layer.pad == (0, 0):
            pad = 'full'
        else:
            raise RuntimeError("Define your padding as full or same.")

        # By definition the
        # Flip filters must be on to be a proper deconvolution.
        num_filters = L.get_output_shape(layer.input_layer)[1]
        if layer.stride == (4,4):
            # Todo: similar code gradient based explainers. Merge.
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1,
                                         pad=pad,
                                         nonlinearity=None,
                                         b=None,
                                         flip_filters=True)
            conv_layer = output_layer
            tmp = L.SliceLayer(output_layer, slice(0, -3), axis=3)
            output_layer = L.SliceLayer(tmp, slice(0, -3), axis=2)
            output_layer.W = conv_layer.W
        else:
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1,
                                         pad=pad,
                                         nonlinearity=None,
                                         b=None,
                                         flip_filters=True)
        W = output_layer.W

        # Do the multiplication.
        x_layer = L.ReshapeLayer(layer.input_layer,
                                 (-1,)+L.get_output_shape(output_layer)[1:])
        output_layer = L.ElemwiseMergeLayer(incomings=[x_layer, output_layer],
                                            merge_function=T.mul)
        output_layer.W = W
        return output_layer
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_Conv2DLayer(self, layer, feeder):
        def _check_padding_same():
            for s, p in zip(layer.filter_size, layer.pad):
                if s % 2 != 1:
                    return False
                elif s//2 != p:
                    return False
            return True

        # Warning they are swapped here.
        feeder = self._put_rectifiers(feeder,layer)

        f_s = layer.filter_size
        if layer.pad == 'same' or _check_padding_same():
            pad = 'same'
        elif layer.pad == 'valid' or layer.pad == (0, 0):
            pad = 'full'
        else:
            raise RuntimeError("Define your padding as full or same.")

        # By definition the
        # Flip filters must be on to be a proper deconvolution.

        num_filters = L.get_output_shape(layer.input_layer)[1]
        if layer.stride == (4,4):
            # Todo: clean this!
            print("Applying alexnet hack.")
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
            print("Applying alexnet hack part 2.")
            conv_layer = output_layer
            output_layer = L.SliceLayer(L.SliceLayer(output_layer,
                                                     slice(0,-3), axis=3),
                                        slice(0,-3), axis=2)
            output_layer.W = conv_layer.W
        elif layer.stride == (2,2):
            # Todo: clean this! Seems to be the same code as for AlexNet above.
            print("Applying GoogLeNet hack.")
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
        else:
            # Todo: clean this. Repetitions all over.
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
        return output_layer
项目:crfrnn_layer    作者:HapeMask    | 项目源码 | 文件源码
def __init__(self, unary, ref, sxy_bf=70, sc_bf=10, compat_bf=6,
                 sxy_spatial=2, compat_spatial=2, num_iter=5,
                 normalize_final_iter=True, trainable_kernels=False,
                 name=None):

        super(CRFasRNNLayer, self).__init__(incomings=[unary, ref], name=name)

        self.sxy_bf = sxy_bf
        self.sc_bf = sc_bf
        self.compat_bf = compat_bf
        self.sxy_spatial = sxy_spatial
        self.compat_spatial = compat_spatial
        self.num_iter = num_iter
        self.normalize_final_iter = normalize_final_iter

        if ll.get_output_shape(ref)[1] not in [1, 3]:
            raise ValueError("Reference image must be either color or greyscale \
(1 or 3 channels).")

        self.val_dim = ll.get_output_shape(unary)[1]
        # +2 for bilateral grid
        self.ref_dim = ll.get_output_shape(ref)[1] + 2

        if self.ref_dim == 5:
            kstd_bf = np.array([sxy_bf, sxy_bf, sc_bf, sc_bf, sc_bf],
                               np.float32)
        else:
            kstd_bf = np.array([sxy_bf, sxy_bf, sc_bf], np.float32)

        self.kstd_bf = self.add_param(kstd_bf, (self.ref_dim,),
                                      name="kern_std",
                                      trainable=trainable_kernels,
                                      regularizable=False)

        gk = gkern(sxy_spatial, self.val_dim)
        self.W_spatial = self.add_param(gk, gk.shape, name="spatial_kernel",
                                        trainable=trainable_kernels,
                                        regularizable=False)

        if None in (self.val_dim, self.ref_dim):
            raise ValueError("CRF RNN requires known channel dimensions for \
all inputs.")
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net
项目:AcousticEventDetection    作者:kahst    | 项目源码 | 文件源码
def buildModel():

    print "BUILDING MODEL TYPE..."

    #default settings
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net