Python lasagne.layers 模块,Conv2DLayer() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用lasagne.layers.Conv2DLayer()

项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _set_inverse_parameters(self, patterns=None):
        self.trainable_layers = [self.inverse_map[l]
                                 for l in L.get_all_layers(self.output_layer)
                                 if type(l) in [L.Conv2DLayer, L.DenseLayer]]
        if patterns is not None:
            if type(patterns) is list:
                patterns = patterns[0]
            for i,layer in enumerate(self.trainable_layers):
                pattern = patterns['A'][i]
                if pattern.ndim == 4:
                    pattern = pattern.transpose(1,0,2,3)
                elif pattern.ndim == 2:
                    pattern = pattern.T
                layer.W.set_value(pattern)
        else:
            print("Patterns not given, explanation is random.")
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def setup_generator(self, input, config):
        for k, v in config.items(): setattr(args, k, v)
        args.zoom = 2**(args.generator_upscale - args.generator_downscale)

        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0', input, units, filter_size=(7,7), pad=(3,3))

        for i in range(0, args.generator_downscale):
            self.make_layer('downscale%i'%i, self.last_layer(), next(units_iter), filter_size=(4,4), stride=(2,2))

        units = next(units_iter)
        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i'%(i+1), self.last_layer(), units)

        for i in range(0, args.generator_upscale):
            u = next(units_iter)
            self.make_layer('upscale%i.2'%i, self.last_layer(), u*4)
            self.network['upscale%i.1'%i] = SubpixelReshuffleLayer(self.last_layer(), u, 2)

        self.network['out'] = ConvLayer(self.last_layer(), 3, filter_size=(7,7), pad=(3,3), nonlinearity=None)
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    # Input / Output
    #------------------------------------------------------------------------------------------------------------------
项目:experiments    作者:tencia    | 项目源码 | 文件源码
def build_fcae(input_var, channels=1):
    ret = {}
    ret['input'] = layer = InputLayer(shape=(None, channels, None, None), input_var=input_var)
    ret['conv1'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=5, pad='full'))
    ret['pool1'] = layer =  MaxPool2DLayer(layer, pool_size=2)
    ret['conv2'] = layer = bn(Conv2DLayer(layer, num_filters=256, filter_size=3, pad='full'))
    ret['pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['conv3'] = layer = bn(Conv2DLayer(layer, num_filters=32, filter_size=3, pad='full'))
    ret['enc'] = layer = GlobalPoolLayer(layer)
    ret['ph1'] = layer = NonlinearityLayer(layer, nonlinearity=None)
    ret['ph2'] = layer = NonlinearityLayer(layer, nonlinearity=None)
    ret['unenc'] = layer = bn(InverseLayer(layer, ret['enc']))
    ret['deconv3'] = layer = bn(Conv2DLayer(layer, num_filters=256, filter_size=3))
    ret['depool2'] = layer = InverseLayer(layer, ret['pool2'])
    ret['deconv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['depool1'] = layer = InverseLayer(layer, ret['pool1'])
    ret['output'] = layer = Conv2DLayer(layer, num_filters=1, filter_size=5,
                                     nonlinearity=nn.nonlinearities.sigmoid)
    return ret
项目:DeepRes    作者:Aneeshers    | 项目源码 | 文件源码
def setup_generator(self, input, config):
        for k, v in config.items(): setattr(args, k, v)
        args.zoom = 2**(args.generator_upscale - args.generator_downscale)

        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0', input, units, filter_size=(7,7), pad=(3,3))

        for i in range(0, args.generator_downscale):
            self.make_layer('downscale%i'%i, self.last_layer(), next(units_iter), filter_size=(4,4), stride=(2,2))

        units = next(units_iter)
        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i'%(i+1), self.last_layer(), units)

        for i in range(0, args.generator_upscale):
            u = next(units_iter)
            self.make_layer('upscale%i.2'%i, self.last_layer(), u*4)
            self.network['upscale%i.1'%i] = SubpixelReshuffleLayer(self.last_layer(), u, 2)

        self.network['out'] = ConvLayer(self.last_layer(), 3, filter_size=(7,7), pad=(3,3), nonlinearity=None)
项目:DeepRes    作者:Aneeshers    | 项目源码 | 文件源码
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_48_net__(self):
        network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)        
        network = layers.batch_norm(network)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)

        network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
项目:neural-enhance    作者:alexjc    | 项目源码 | 文件源码
def setup_generator(self, input, config):
        for k, v in config.items(): setattr(args, k, v)
        args.zoom = 2**(args.generator_upscale - args.generator_downscale)

        units_iter = extend(args.generator_filters)
        units = next(units_iter)
        self.make_layer('iter.0', input, units, filter_size=(7,7), pad=(3,3))

        for i in range(0, args.generator_downscale):
            self.make_layer('downscale%i'%i, self.last_layer(), next(units_iter), filter_size=(4,4), stride=(2,2))

        units = next(units_iter)
        for i in range(0, args.generator_blocks):
            self.make_block('iter.%i'%(i+1), self.last_layer(), units)

        for i in range(0, args.generator_upscale):
            u = next(units_iter)
            self.make_layer('upscale%i.2'%i, self.last_layer(), u*4)
            self.network['upscale%i.1'%i] = SubpixelReshuffleLayer(self.last_layer(), u, 2)

        self.network['out'] = ConvLayer(self.last_layer(), 3, filter_size=(7,7), pad=(3,3), nonlinearity=None)
项目:neural-enhance    作者:alexjc    | 项目源码 | 文件源码
def setup_discriminator(self):
        c = args.discriminator_size
        self.make_layer('disc1.1', batch_norm(self.network['conv1_2']), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc1.2', self.last_layer(), 1*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc2', batch_norm(self.network['conv2_2']), 2*c, filter_size=(5,5), stride=(2,2), pad=(2,2))
        self.make_layer('disc3', batch_norm(self.network['conv3_2']), 3*c, filter_size=(3,3), stride=(1,1), pad=(1,1))
        hypercolumn = ConcatLayer([self.network['disc1.2>'], self.network['disc2>'], self.network['disc3>']])
        self.make_layer('disc4', hypercolumn, 4*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.make_layer('disc5', self.last_layer(), 3*c, filter_size=(3,3), stride=(2,2))
        self.make_layer('disc6', self.last_layer(), 2*c, filter_size=(1,1), stride=(1,1), pad=(0,0))
        self.network['disc'] = batch_norm(ConvLayer(self.last_layer(), 1, filter_size=(1,1),
                                                    nonlinearity=lasagne.nonlinearities.linear))


    #------------------------------------------------------------------------------------------------------------------
    # Input / Output
    #------------------------------------------------------------------------------------------------------------------
项目:adda_mnist64    作者:davidtellez    | 项目源码 | 文件源码
def network_classifier(self, input_var):

        network = {}
        network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
        network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
        network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
        network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
        network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
        network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
        network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
        network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
        network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
        network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
        network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')

        return network
项目:ConvolutionalAutoEncoder    作者:ToniCreswell    | 项目源码 | 文件源码
def build_net(nz=10):
    # nz = size of latent code
    #N.B. using batch_norm applies bn before non-linearity!
    F=32
    enc = InputLayer(shape=(None,1,28,28))
    enc = Conv2DLayer(incoming=enc, num_filters=F*2, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=1, nonlinearity=lrelu(0.2),pad=2)
    enc = reshape(incoming=enc, shape=(-1,F*4*7*7))
    enc = DenseLayer(incoming=enc, num_units=nz, nonlinearity=sigmoid)
    #Generator networks
    dec = InputLayer(shape=(None,nz))
    dec = DenseLayer(incoming=dec, num_units=F*4*7*7)
    dec = reshape(incoming=dec, shape=(-1,F*4,7,7))
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=1, filter_size=3, stride=1, nonlinearity=sigmoid, crop=1)

    return enc, dec
项目:aenet    作者:znaoya    | 项目源码 | 文件源码
def build_model(self):
        '''
        Build Acoustic Event Net model
        :return:
        '''

        # A architecture 41 classes
        nonlin = lasagne.nonlinearities.rectify
        net = {}
        net['input'] = InputLayer((None, feat_shape[0], feat_shape[1], feat_shape[2]))  # channel, time. frequency
        # ----------- 1st layer group ---------------
        net['conv1a'] = ConvLayer(net['input'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv1b'] = ConvLayer(net['conv1a'], num_filters=64, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool1'] = MaxPool2DLayer(net['conv1b'], pool_size=(1, 2))  # (time, freq)
        # ----------- 2nd layer group ---------------
        net['conv2a'] = ConvLayer(net['pool1'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['conv2b'] = ConvLayer(net['conv2a'], num_filters=128, filter_size=(3, 3), stride=1, nonlinearity=nonlin)
        net['pool2'] = MaxPool2DLayer(net['conv2b'], pool_size=(2, 2))  # (time, freq)
        # ----------- fully connected layer group ---------------
        net['fc5'] = DenseLayer(net['pool2'], num_units=1024, nonlinearity=nonlin)
        net['fc6'] = DenseLayer(net['fc5'], num_units=1024, nonlinearity=nonlin)
        net['prob'] = DenseLayer(net['fc6'], num_units=41, nonlinearity=lasagne.nonlinearities.softmax)

        return net
项目:kaggle-breast-cancer-prediction    作者:sirCamp    | 项目源码 | 文件源码
def CNN(n_epochs):
    net1 = NeuralNet(
        layers=[
            ('input', layers.InputLayer),
            ('conv1', layers.Conv2DLayer),  # Convolutional layer.  Params defined below
            ('pool1', layers.MaxPool2DLayer),  # Like downsampling, for execution speed
            ('conv2', layers.Conv2DLayer),
            ('hidden3', layers.DenseLayer),
            ('output', layers.DenseLayer),
        ],

        input_shape=(None, 1, 6, 5),
        conv1_num_filters=8,
        conv1_filter_size=(3, 3),
        conv1_nonlinearity=lasagne.nonlinearities.rectify,

        pool1_pool_size=(2, 2),

        conv2_num_filters=12,
        conv2_filter_size=(1, 1),
        conv2_nonlinearity=lasagne.nonlinearities.rectify,

        hidden3_num_units=1000,
        output_num_units=2,
        output_nonlinearity=lasagne.nonlinearities.softmax,

        update_learning_rate=0.0001,
        update_momentum=0.9,

        max_epochs=n_epochs,
        verbose=0,
    )
    return net1
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _set_inverse_parameters(self, patterns=None):
        for l in L.get_all_layers(self.output_layer):
            if type(l) is L.Conv2DLayer:
                W = l.W.get_value()
                if l.flip_filters:
                    W = W[:,:,::-1,::-1]
                W = W.transpose(1,0,2,3)
                self.inverse_map[l].W.set_value(W)
            elif type(l) is L.DenseLayer:
                self.inverse_map[l].W.set_value(l.W.get_value().T)
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _set_inverse_parameters(self, patterns=None):
        for l in L.get_all_layers(self.output_layer):
            if type(l) is L.Conv2DLayer:
                W = l.W.get_value()
                if l.flip_filters:
                    W = W[:,:,::-1,::-1]
                W = W.transpose(1,0,2,3)
                self.inverse_map[l].W.set_value(W)
            elif type(l) is L.DenseLayer:
                self.inverse_map[l].W.set_value(l.W.get_value().T)
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _get_normalised_relevance_layer(self, layer, feeder):

        def add_epsilon(Zs):
            tmp = (T.cast(Zs >= 0, theano.config.floatX)*2.0 - 1.0)
            return  Zs + self.epsilon * tmp

        if isinstance(layer, L.DenseLayer):
            forward_layer = L.DenseLayer(layer.input_layer,
                                         layer.num_units,
                                         W=layer.W,
                                         b=layer.b,
                                         nonlinearity=None)
        elif isinstance(layer, L.Conv2DLayer):
            forward_layer = L.Conv2DLayer(layer.input_layer,
                                          num_filters=layer.num_filters,
                                          W=layer.W,
                                          b=layer.b,
                                          stride=layer.stride,
                                          filter_size=layer.filter_size,
                                          flip_filters=layer.flip_filters,
                                          untie_biases=layer.untie_biases,
                                          pad=layer.pad,
                                          nonlinearity=None)
        else:
            raise NotImplementedError()

        forward_layer = L.ExpressionLayer(forward_layer,
                                          lambda x: 1.0 / add_epsilon(x))
        feeder = L.ElemwiseMergeLayer([forward_layer, feeder],
                                      merge_function=T.mul)

        return feeder
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_layer(self, layer, feeder):
        layer_type = type(layer)

        if L.get_output_shape(feeder) != L.get_output_shape(layer):
            feeder = L.ReshapeLayer(feeder, (-1,)+L.get_output_shape(layer)[1:])
        if layer_type is L.InputLayer:
            return self._invert_InputLayer(layer, feeder)
        elif layer_type is L.FlattenLayer:
            return self._invert_FlattenLayer(layer, feeder)
        elif layer_type is L.DenseLayer:
            return self._invert_DenseLayer(layer, feeder)
        elif layer_type is L.Conv2DLayer:
            return self._invert_Conv2DLayer(layer, feeder)
        elif layer_type is L.DropoutLayer:
            return self._invert_DropoutLayer(layer, feeder)
        elif layer_type in [L.MaxPool2DLayer, L.MaxPool1DLayer]:
            return self._invert_MaxPoolingLayer(layer, feeder)
        elif layer_type is L.PadLayer:
            return self._invert_PadLayer(layer, feeder)
        elif layer_type is L.SliceLayer:
            return self._invert_SliceLayer(layer, feeder)
        elif layer_type is L.LocalResponseNormalization2DLayer:
            return self._invert_LocalResponseNormalisation2DLayer(layer, feeder)
        elif layer_type is L.GlobalPoolLayer:
            return self._invert_GlobalPoolLayer(layer, feeder)
        else:
            return self._invert_UnknownLayer(layer, feeder)
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _collect_layers(self):
        self.all_layers = L.get_all_layers(self.output_layer)
        ret = [l for l in self.all_layers if
                type(l) in [L.DenseLayer, L.Conv2DLayer]]

        return ret
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _get_split(self, layer,
                   deterministic=True, conv_all_patches=True, **kwargs):

        # Get the patches and the outputs without the non-linearities.
        if type(layer) is L.DenseLayer:
            x, y = putils.get_dense_xy(layer, deterministic)
        elif type(layer) is L.Conv2DLayer:
            if conv_all_patches is True:
                x, y = putils.get_conv_xy_all(layer, deterministic)
            else:
                x, y = putils.get_conv_xy(layer, deterministic)
        else:
            raise ValueError("Unknown layer as input")

        # Create an output dictionary
        outputs = dict()

        for name, fun in subtypes:
            outputs[name] = dict()
            mrk_y = 1.0* T.cast(fun(y), dtype=theano.config.floatX)  # (N,O)
            y_current = y*mrk_y # This has a binary mask
            cnt_y = T.shape_padaxis(T.sum(mrk_y, axis=0), axis=0)  # (1,O)
            norm = T.maximum(cnt_y, 1.)

            # Count how many datapoints are considered
            outputs[name]['cnt'] = cnt_y

            # The mean of the current batch
            outputs[name]['m_y'] = T.shape_padaxis(y_current.sum(axis=0), axis=0) / norm  # (1,O) mean output for batch
            outputs[name]['m_x'] = T.dot(x.T, mrk_y) / norm  # (D,O) mean input for batch

            # The mean of the current batch
            outputs[name]['yty'] = T.shape_padaxis(T.sum(y_current ** 2., axis=0), axis=0) / norm  # (1,O)
            outputs[name]['xty'] = T.dot(x.T, y_current) / norm  # D,O

        return dict_to_list(outputs)
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def get_split(self, layer,
                  deterministic=True, conv_all_patches=True, **kwargs):

        # Get the patches and the outputs without the non-linearities.
        if type(layer) is L.DenseLayer:
            x, y = get_dense_xy(layer, deterministic)
        elif type(layer) is L.Conv2DLayer:
            if conv_all_patches is True:
                x, y = get_conv_xy_all(layer, deterministic)
            else:
                x, y = get_conv_xy(layer, deterministic)
        else:
            raise ValueError("Unknown layer as input")

        # Create an output dictionary
        outputs = dict()

        for name, fun in subtypes:
            outputs[name] = dict()
            mrk_y = 1.0* T.cast(fun(y), dtype=theano.config.floatX)  # (N,O)
            y_current = y*mrk_y # This has a binary mask
            cnt_y = T.shape_padaxis(T.sum(mrk_y, axis=0), axis=0)  # (1,O)
            norm = T.maximum(cnt_y, 1.)

            # Count how many datapoints are considered
            outputs[name]['cnt'] = cnt_y

            # The mean of the current batch
            outputs[name]['m_y'] = T.shape_padaxis(y_current.sum(axis=0), axis=0) / norm  # (1,O) mean output for batch
            outputs[name]['m_x'] = T.dot(x.T, mrk_y) / norm  # (D,O) mean input for batch

            # The mean of the current batch
            outputs[name]['yty'] = T.shape_padaxis(T.sum(y_current ** 2., axis=0), axis=0) / norm  # (1,O)
            outputs[name]['xty'] = T.dot(x.T, y_current) / norm  # D,O

        return dict_to_list(outputs)
项目:began    作者:davidtellez    | 项目源码 | 文件源码
def conv_layer(input, n_filters, stride, name, network_weights, nonlinearity=elu, bn=False):

    layer = Conv2DLayer(input, num_filters=n_filters, filter_size=3, stride=stride, pad='same',
                        nonlinearity=nonlinearity, name=name, W=get_W(network_weights, name), b=get_b(network_weights, name))
    if bn:
        layer = batch_norm(layer)
    return layer
项目:Deopen    作者:kimmo1019    | 项目源码 | 文件源码
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer16, num_units=256)
    network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
    return network


#random search to initialize the weights
项目:Deopen    作者:kimmo1019    | 项目源码 | 文件源码
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    #layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer15, num_units=256)
    network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
    return network


#random search to initialize the weights
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def make_layer(self, name, input, units, filter_size=(3,3), stride=(1,1), pad=(1,1), alpha=0.25):
        conv = ConvLayer(input, units, filter_size, stride=stride, pad=pad, nonlinearity=None)
        prelu = lasagne.layers.ParametricRectifierLayer(conv, alpha=lasagne.init.Constant(alpha))
        self.network[name+'x'] = conv
        self.network[name+'>'] = prelu
        return prelu
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def setup_perceptual(self, input):
        """Use lasagne to create a network of convolution layers using pre-trained VGG19 weights.
        """
        offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1))
        self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset)

        self.network['mse'] = self.network['percept']
        self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1)
        self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1)
        self.network['pool1']   = PoolLayer(self.network['conv1_2'], 2, mode='max')
        self.network['conv2_1'] = ConvLayer(self.network['pool1'],   128, 3, pad=1)
        self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1)
        self.network['pool2']   = PoolLayer(self.network['conv2_2'], 2, mode='max')
        self.network['conv3_1'] = ConvLayer(self.network['pool2'],   256, 3, pad=1)
        self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1)
        self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1)
        self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1)
        self.network['pool3']   = PoolLayer(self.network['conv3_4'], 2, mode='max')
        self.network['conv4_1'] = ConvLayer(self.network['pool3'],   512, 3, pad=1)
        self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1)
        self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1)
        self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1)
        self.network['pool4']   = PoolLayer(self.network['conv4_4'], 2, mode='max')
        self.network['conv5_1'] = ConvLayer(self.network['pool4'],   512, 3, pad=1)
        self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1)
        self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1)
        self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1)
项目:lasagne_CNN_framework    作者:woshialex    | 项目源码 | 文件源码
def bn_conv(input_layer, **kwargs):
    l = Conv2DLayer(input_layer, **kwargs)
    l = batch_norm(l, epsilon=0.001)
    return l
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def build_cnn(k_height=1, k_width=25, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 1, 4, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2))

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def build_cnn(k_height=1, k_width=25, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 1, 30, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (3,3), stride = (3,3))

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def build_cnn(k_height=1, k_width=25, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 5, NUM_ELECTRODES, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2))

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def build_cnn(k_height=3, k_width=3, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 5, 30, 30), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2))

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def build_cnn(input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (3,3),
                        stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02),
                        nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2)

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def build_cnn(k_height, k_width, input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
                          filter_size = (k_height, k_width),
                          stride = 1, pad = 'same',
                          W = lasagne.init.Normal(std = 0.02),
                          nonlinearity = lasagne.nonlinearities.very_leaky_rectify)

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2)

    l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)

    l_fc = lasagne.layers.DenseLayer(
            l_drop1,
            num_units=50,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_drop2 = lasagne.layers.dropout(l_fc, p=.75)

    l_out = lasagne.layers.DenseLayer(
            l_drop2,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def build_cnn(input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = 3,
                        stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02),
                        nonlinearity = lasagne.nonlinearities.very_leaky_rectify) 

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2)

    # A fully-connected layer
    l_fc = lasagne.layers.DenseLayer(
            l_pool1,
            num_units=512,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_out = lasagne.layers.DenseLayer(
            l_fc,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def build_cnn(input_var=None):
    # Input layer, as usual:
    l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var)

    l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (3,3),
                        stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02),
                        nonlinearity = lasagne.nonlinearities.very_leaky_rectify) 

    l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2)

    # A fully-connected layer
    l_fc = lasagne.layers.DenseLayer(
            l_pool1,
            num_units=512,
            nonlinearity=lasagne.nonlinearities.rectify)

    l_out = lasagne.layers.DenseLayer(
            l_fc,
            num_units=2,
            nonlinearity=lasagne.nonlinearities.softmax)

    return l_out

# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
项目:experiments    作者:tencia    | 项目源码 | 文件源码
def build_nets(input_var, channels=1, do_batchnorm=True, z_dim=100):

    def ns(shape):
        ret=list(shape)
        ret[0]=[0]
        return tuple(ret)

    ret = {}
    bn = batch_norm if do_batchnorm else lambda x:x
    ret['ae_in'] = layer = InputLayer(shape=(None,channels,28,28), input_var=input_var)
    ret['ae_conv1'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=5))
    ret['ae_pool1'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['ae_conv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['ae_pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['ae_enc'] = layer = DenseLayer(layer, num_units=z_dim,
            nonlinearity=nn.nonlinearities.tanh)
    ret['ae_unenc'] = layer = bn(nn.layers.DenseLayer(layer,
        num_units = np.product(nn.layers.get_output_shape(ret['ae_pool2'])[1:])))
    ret['ae_resh'] = layer = ReshapeLayer(layer,
            shape=ns(nn.layers.get_output_shape(ret['ae_pool2'])))
    ret['ae_depool2'] = layer = Upscale2DLayer(layer, scale_factor=2)
    ret['ae_deconv2'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=3,
        pad='full'))
    ret['ae_depool1'] = layer = Upscale2DLayer(layer, scale_factor=2)
    ret['ae_out'] = Conv2DLayer(layer, num_filters=1, filter_size=5, pad='full',
            nonlinearity=nn.nonlinearities.sigmoid)

    ret['disc_in'] = layer = InputLayer(shape=(None,channels,28,28), input_var=input_var)
    ret['disc_conv1'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=5))
    ret['disc_pool1'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['disc_conv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['disc_pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['disc_hid'] = layer = bn(DenseLayer(layer, num_units=100))
    ret['disc_out'] = DenseLayer(layer, num_units=1, nonlinearity=nn.nonlinearities.sigmoid)

    return ret
项目:DeepRes    作者:Aneeshers    | 项目源码 | 文件源码
def make_layer(self, name, input, units, filter_size=(3,3), stride=(1,1), pad=(1,1), alpha=0.25):
        conv = ConvLayer(input, units, filter_size, stride=stride, pad=pad, nonlinearity=None)
        prelu = lasagne.layers.ParametricRectifierLayer(conv, alpha=lasagne.init.Constant(alpha))
        self.network[name+'x'] = conv
        self.network[name+'>'] = prelu
        return prelu
项目:DeepRes    作者:Aneeshers    | 项目源码 | 文件源码
def setup_perceptual(self, input):

        offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1))
        self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset)

        self.network['mse'] = self.network['percept']
        self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1)
        self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1)
        self.network['pool1']   = PoolLayer(self.network['conv1_2'], 2, mode='max')
        self.network['conv2_1'] = ConvLayer(self.network['pool1'],   128, 3, pad=1)
        self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1)
        self.network['pool2']   = PoolLayer(self.network['conv2_2'], 2, mode='max')
        self.network['conv3_1'] = ConvLayer(self.network['pool2'],   256, 3, pad=1)
        self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1)
        self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1)
        self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1)
        self.network['pool3']   = PoolLayer(self.network['conv3_4'], 2, mode='max')
        self.network['conv4_1'] = ConvLayer(self.network['pool3'],   512, 3, pad=1)
        self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1)
        self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1)
        self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1)
        self.network['pool4']   = PoolLayer(self.network['conv4_4'], 2, mode='max')
        self.network['conv5_1'] = ConvLayer(self.network['pool4'],   512, 3, pad=1)
        self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1)
        self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1)
        self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1)
项目:photo-auto-balance    作者:starcolon    | 项目源码 | 文件源码
def new(self, image_dim, final_vec_dim):

    input_dim  = (None,) + image_dim

    # Create initial nets, one per final vector element
    self.nets = []
    self.input_layers = []
    for i in range(final_vec_dim):
      l_input = layers.InputLayer(shape=input_dim)
      l_conv0 = layers.Conv2DLayer(l_input, 64, (5,5))
      l_max0  = layers.MaxPool2DLayer(l_conv0, (5,5), stride=3)
      l_conv1 = layers.Conv2DLayer(l_max0, 32, (5,5))
      l_max1  = layers.MaxPool2DLayer(l_conv1, (5,5), stride=2)
      l_conv2 = layers.Conv2DLayer(l_conv1, 32, (3,3))
      l_pool  = layers.MaxPool2DLayer(l_conv2, (3,3), stride=1)
      l_1d1   = layers.DenseLayer(l_pool, 24)
      l_1d2   = layers.DenseLayer(l_1d1, 8)
      l_1d3   = layers.DenseLayer(l_1d2, 1)

      self.nets.append(l_1d3)
      self.input_layers.append(l_input)

  # Train the neural net
  # @param {Matrix} trainset X
  # @param {Vector} trainset y
  # @param {Matrix} validation set X
  # @param {Vector} validation set y
  # @param {int} batch size
  # @param {int} number of epochs to run
  # @param {list[double]} learning rates (non-negative, non-zero)
  # @param {str} path to save model
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_12_net__(self):

        network = layers.InputLayer((None, 3, 12, 12), input_var=self.__input_var__)
        network = layers.dropout(network, p=0.1)
        network = layers.Conv2DLayer(network,num_filters=16,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DropoutLayer(network,p=0.3)        
        network = layers.DenseLayer(network,num_units = 16,nonlinearity = relu)
        network = layers.batch_norm(network)
        network = layers.DropoutLayer(network,p=0.3)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_24_net__(self):

        network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
        network = layers.dropout(network, p=0.1)
        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DropoutLayer(network,p=0.5)
        network = layers.batch_norm(network)
        network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
        network = layers.DropoutLayer(network,p=0.5)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_12_calib_net__(self):
        network = layers.InputLayer((None, 3, 12, 12), input_var=self.__input_var__)
        network = layers.Conv2DLayer(network,num_filters=16,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DenseLayer(network,num_units = 128,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
        return network
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_24_calib_net__(self):
        network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
        network = layers.Conv2DLayer(network,num_filters=32,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
        return network
项目:diagnose-heart    作者:woshialex    | 项目源码 | 文件源码
def build_fcn_segmenter(input_var, shape, version=2):
    ret = {}

    if version == 2:
        ret['input'] = la = InputLayer(shape, input_var)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['pool%d'%len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=64, filter_size=3,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=32, filter_size=7,
            pad='full'))
        ret['ups%d'%len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=16, filter_size=3,
            pad='full'))
        ret['conv%d'%len(ret)] = la = bn(Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['output'] = la = Conv2DLayer(la, num_filters=1, filter_size=7,
                pad='full', nonlinearity=nn.nonlinearities.sigmoid)

    return ret, nn.layers.get_output(ret['output']), \
            nn.layers.get_output(ret['output'], deterministic=True)
项目:neural-enhance    作者:alexjc    | 项目源码 | 文件源码
def make_layer(self, name, input, units, filter_size=(3,3), stride=(1,1), pad=(1,1), alpha=0.25):
        conv = ConvLayer(input, units, filter_size, stride=stride, pad=pad, nonlinearity=None)
        prelu = lasagne.layers.ParametricRectifierLayer(conv, alpha=lasagne.init.Constant(alpha))
        self.network[name+'x'] = conv
        self.network[name+'>'] = prelu
        return prelu
项目:neural-enhance    作者:alexjc    | 项目源码 | 文件源码
def setup_perceptual(self, input):
        """Use lasagne to create a network of convolution layers using pre-trained VGG19 weights.
        """
        offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1))
        self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset)

        self.network['mse'] = self.network['percept']
        self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1)
        self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1)
        self.network['pool1']   = PoolLayer(self.network['conv1_2'], 2, mode='max')
        self.network['conv2_1'] = ConvLayer(self.network['pool1'],   128, 3, pad=1)
        self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1)
        self.network['pool2']   = PoolLayer(self.network['conv2_2'], 2, mode='max')
        self.network['conv3_1'] = ConvLayer(self.network['pool2'],   256, 3, pad=1)
        self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1)
        self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1)
        self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1)
        self.network['pool3']   = PoolLayer(self.network['conv3_4'], 2, mode='max')
        self.network['conv4_1'] = ConvLayer(self.network['pool3'],   512, 3, pad=1)
        self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1)
        self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1)
        self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1)
        self.network['pool4']   = PoolLayer(self.network['conv4_4'], 2, mode='max')
        self.network['conv5_1'] = ConvLayer(self.network['pool4'],   512, 3, pad=1)
        self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1)
        self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1)
        self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1)
项目:gogh-figure    作者:joelmoniz    | 项目源码 | 文件源码
def style_conv_block(conv_in, num_styles, num_filters, filter_size, stride, nonlinearity=rectify, normalization=instance_norm):
    sc_network = ReflectLayer(conv_in, filter_size//2)
    sc_network = normalization(ConvLayer(sc_network, num_filters, filter_size, stride, nonlinearity=nonlinearity, W=Normal()), num_styles=num_styles)
    return sc_network
项目:gogh-figure    作者:joelmoniz    | 项目源码 | 文件源码
def setup_loss_net(self):
        """
        Create a network of convolution layers based on the VGG16 architecture from the paper:
        "Very Deep Convolutional Networks for Large-Scale Image Recognition"

        Original source: https://gist.github.com/ksimonyan/211839e770f7b538e2d8
        License: see http://www.robots.ox.ac.uk/~vgg/research/very_deep/

        Based on code in the Lasagne Recipes repository: https://github.com/Lasagne/Recipes
        """
        loss_net = self.network['loss_net']
        loss_net['input'] = InputLayer(shape=self.shape)
        loss_net['conv1_1'] = ConvLayer(loss_net['input'], 64, 3, pad=1, flip_filters=False)
        loss_net['conv1_2'] = ConvLayer(loss_net['conv1_1'], 64, 3, pad=1, flip_filters=False)
        loss_net['pool1'] = PoolLayer(loss_net['conv1_2'], 2)
        loss_net['conv2_1'] = ConvLayer(loss_net['pool1'], 128, 3, pad=1, flip_filters=False)
        loss_net['conv2_2'] = ConvLayer(loss_net['conv2_1'], 128, 3, pad=1, flip_filters=False)
        loss_net['pool2'] = PoolLayer(loss_net['conv2_2'], 2)
        loss_net['conv3_1'] = ConvLayer(loss_net['pool2'], 256, 3, pad=1, flip_filters=False)
        loss_net['conv3_2'] = ConvLayer(loss_net['conv3_1'], 256, 3, pad=1, flip_filters=False)
        loss_net['conv3_3'] = ConvLayer(loss_net['conv3_2'], 256, 3, pad=1, flip_filters=False)
        loss_net['pool3'] = PoolLayer(loss_net['conv3_3'], 2)
        loss_net['conv4_1'] = ConvLayer(loss_net['pool3'], 512, 3, pad=1, flip_filters=False)
        loss_net['conv4_2'] = ConvLayer(loss_net['conv4_1'], 512, 3, pad=1, flip_filters=False)
        loss_net['conv4_3'] = ConvLayer(loss_net['conv4_2'], 512, 3, pad=1, flip_filters=False)
        loss_net['pool4'] = PoolLayer(loss_net['conv4_3'], 2)
        loss_net['conv5_1'] = ConvLayer(loss_net['pool4'], 512, 3, pad=1, flip_filters=False)
        loss_net['conv5_2'] = ConvLayer(loss_net['conv5_1'], 512, 3, pad=1, flip_filters=False)
        loss_net['conv5_3'] = ConvLayer(loss_net['conv5_2'], 512, 3, pad=1, flip_filters=False)
项目:adda_mnist64    作者:davidtellez    | 项目源码 | 文件源码
def network_discriminator(self, features):

        network = {}
        network['discriminator/conv2'] = Conv2DLayer(features, num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv2')
        network['discriminator/pool2'] = MaxPool2DLayer(network['discriminator/conv2'], pool_size=2, stride=2, pad=0, name='discriminator/pool2')
        network['discriminator/conv3'] = Conv2DLayer(network['discriminator/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv3')
        network['discriminator/pool3'] = MaxPool2DLayer(network['discriminator/conv3'], pool_size=2, stride=2, pad=0, name='discriminator/pool3')
        network['discriminator/conv4'] = Conv2DLayer(network['discriminator/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv4')
        network['discriminator/pool4'] = MaxPool2DLayer(network['discriminator/conv4'], pool_size=2, stride=2, pad=0, name='discriminator/pool4')
        network['discriminator/dense1'] = DenseLayer(network['discriminator/pool4'], num_units=64, nonlinearity=rectify, name='discriminator/dense1')
        network['discriminator/output'] = DenseLayer(network['discriminator/dense1'], num_units=2, nonlinearity=softmax, name='discriminator/output')

        return network
项目:dqn_vizdoom_theano    作者:mihahauke    | 项目源码 | 文件源码
def _initialize_network(self, img_input_shape, misc_len, output_size, img_input, misc_input=None, **kwargs):

        input_layers = []
        inputs = [img_input]
        # weights_init = lasagne.init.GlorotUniform("relu")
        weights_init = lasagne.init.HeNormal("relu")

        network = ls.InputLayer(shape=img_input_shape, input_var=img_input)
        input_layers.append(network)
        network = ls.Conv2DLayer(network, num_filters=32, filter_size=8, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=4)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=4, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=2)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=3, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=1)

        if self.misc_state_included:
            inputs.append(misc_input)
            network = ls.FlattenLayer(network)
            misc_input_layer = ls.InputLayer(shape=(None, misc_len), input_var=misc_input)
            input_layers.append(misc_input_layer)
            if "additional_misc_layer" in kwargs:
                misc_input_layer = ls.DenseLayer(misc_input_layer, int(kwargs["additional_misc_layer"]),
                                                 nonlinearity=rectify,
                                                 W=weights_init, b=lasagne.init.Constant(0.1))

            network = ls.ConcatLayer([network, misc_input_layer])

        network = ls.DenseLayer(network, 512, nonlinearity=rectify,
                                W=weights_init, b=lasagne.init.Constant(0.1))

        network = ls.DenseLayer(network, output_size, nonlinearity=None, b=lasagne.init.Constant(.1))
        return network, input_layers, inputs
项目:dqn_vizdoom_theano    作者:mihahauke    | 项目源码 | 文件源码
def _initialize_network(self, img_input_shape, misc_len, output_size, img_input, misc_input=None, **kwargs):
        input_layers = []
        inputs = [img_input]
        # weights_init = lasagne.init.GlorotUniform("relu")
        weights_init = lasagne.init.HeNormal("relu")

        network = ls.InputLayer(shape=img_input_shape, input_var=img_input)
        input_layers.append(network)
        network = ls.Conv2DLayer(network, num_filters=32, filter_size=8, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(.1), stride=4)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=4, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(.1), stride=2)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=3, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(.1), stride=1)

        if self.misc_state_included:
            inputs.append(misc_input)
            network = ls.FlattenLayer(network)
            misc_input_layer = ls.InputLayer(shape=(None, misc_len), input_var=misc_input)
            input_layers.append(misc_input_layer)
            if "additional_misc_layer" in kwargs:
                misc_input_layer = ls.DenseLayer(misc_input_layer, int(kwargs["additional_misc_layer"]),
                                                 nonlinearity=rectify,
                                                 W=weights_init, b=lasagne.init.Constant(0.1))
            network = ls.ConcatLayer([network, misc_input_layer])

        # Duelling here

        advanteges_branch = ls.DenseLayer(network, 256, nonlinearity=rectify,
                                          W=weights_init, b=lasagne.init.Constant(.1))
        advanteges_branch = ls.DenseLayer(advanteges_branch, output_size, nonlinearity=None,
                                          b=lasagne.init.Constant(.1))

        state_value_branch = ls.DenseLayer(network, 256, nonlinearity=rectify,
                                           W=weights_init, b=lasagne.init.Constant(.1))
        state_value_branch = ls.DenseLayer(state_value_branch, 1, nonlinearity=None,
                                           b=lasagne.init.Constant(.1))

        network = DuellingMergeLayer([advanteges_branch, state_value_branch])
        return network, input_layers, inputs