Python lasagne.nonlinearities 模块,rectify() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用lasagne.nonlinearities.rectify()

项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def highway_conv3(incoming, nonlinearity=nn.nonlinearities.rectify, **kwargs):
    wh = nn.init.Orthogonal('relu')
    bh = nn.init.Constant(0.0)
    wt = nn.init.Orthogonal('relu')
    bt = nn.init.Constant(-2.)
    num_filters = incoming.output_shape[1]

    # H
    l_h = Conv2DDNNLayer(incoming, num_filters=num_filters,
                         filter_size=(3, 3), stride=(1, 1),
                         pad='same', W=wh, b=bh,
                         nonlinearity=nonlinearity)
    # T
    l_t = Conv2DDNNLayer(incoming, num_filters=num_filters,
                         filter_size=(3, 3), stride=(1, 1),
                         pad='same', W=wt, b=bt,
                         nonlinearity=T.nnet.sigmoid)

    return HighwayLayer(gate=l_t, input1=l_h, input2=incoming)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, num_filters, filter_size, stride=1,
                 pad=0, untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, flip_filters=True,
                 convolution=conv.conv1d_mc0, **kwargs):
        if isinstance(incoming, tuple):
            input_shape = incoming
        else:
            input_shape = incoming.output_shape

        # Retrieve the supplied name, if it exists; otherwise use ''
        if 'name' in kwargs:
            basename = kwargs['name'] + '.'
            # Create a separate version of kwargs for the contained layers
            # which does not include 'name'
            layer_kwargs = dict((key, arg) for key, arg in kwargs.items() if key != 'name')
        else:
            basename = ''
            layer_kwargs = kwargs
        self.conv1d = Conv1DLayer(InputLayer((None,) + input_shape[2:]), num_filters, filter_size, stride, pad,
                                  untie_biases, W, b, nonlinearity, flip_filters, convolution, name=basename + "conv1d",
                                  **layer_kwargs)
        self.W = self.conv1d.W
        self.b = self.conv1d.b
        super(ConvTimeStep1DLayer, self).__init__(incoming, **kwargs)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
项目:MachineComprehension    作者:sa-j    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(CustomDense, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        num_inputs = self.input_shape[-1]

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
项目:deep_learning    作者:Vict0rSch    | 项目源码 | 文件源码
def build_mlp(input_var=None):
    l_in = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)

    l_hid1 = DenseLayer(
            l_in, num_units=500,
            nonlinearity=rectify,
            W=lasagne.init.GlorotUniform())
    l_hid1_drop = DropoutLayer(l_hid1, p=0.4)

    l_hid2 = DenseLayer(
            l_hid1_drop, num_units=300,
            nonlinearity=rectify)
    l_hid2_drop = DropoutLayer(l_hid2, p=0.4)

    l_out = DenseLayer(
            l_hid2_drop, num_units=10,
            nonlinearity=softmax)

    return l_out


# generator giving the batches
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_48_net__(self):
        network = layers.InputLayer((None, 3, 48, 48), input_var=self.__input_var__)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)        
        network = layers.batch_norm(network)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)

        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)

        network = layers.DenseLayer(network,num_units = 256,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
项目:adda_mnist64    作者:davidtellez    | 项目源码 | 文件源码
def network_classifier(self, input_var):

        network = {}
        network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
        network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
        network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
        network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
        network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
        network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
        network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
        network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
        network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
        network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
        network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')

        return network
项目:ConvolutionalAutoEncoder    作者:ToniCreswell    | 项目源码 | 文件源码
def build_net(nz=10):
    # nz = size of latent code
    #N.B. using batch_norm applies bn before non-linearity!
    F=32
    enc = InputLayer(shape=(None,1,28,28))
    enc = Conv2DLayer(incoming=enc, num_filters=F*2, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=1, nonlinearity=lrelu(0.2),pad=2)
    enc = reshape(incoming=enc, shape=(-1,F*4*7*7))
    enc = DenseLayer(incoming=enc, num_units=nz, nonlinearity=sigmoid)
    #Generator networks
    dec = InputLayer(shape=(None,nz))
    dec = DenseLayer(incoming=dec, num_units=F*4*7*7)
    dec = reshape(incoming=dec, shape=(-1,F*4,7,7))
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=1, filter_size=3, stride=1, nonlinearity=sigmoid, crop=1)

    return enc, dec
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def load_dbn(path='models/oulu_ae.mat'):
    """
    load a pretrained dbn from path
    :param path: path to the .mat dbn
    :return: pretrained deep belief network
    """
    # create the network using weights from pretrain_nn.mat
    nn = sio.loadmat(path)
    w1 = nn['w1']
    w2 = nn['w2']
    w3 = nn['w3']
    w4 = nn['w4']
    b1 = nn['b1'][0]
    b2 = nn['b2'][0]
    b3 = nn['b3'][0]
    b4 = nn['b4'][0]
    weights = [w1, w2, w3, w4]
    biases = [b1, b2, b3, b4]
    shapes = [2000, 1000, 500, 50]
    nonlinearities = [rectify, rectify, rectify, linear]
    return weights, biases, shapes, nonlinearities
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def load_dbn(path='models/oulu_ae.mat'):
    """
    load a pretrained dbn from path
    :param path: path to the .mat dbn
    :return: pretrained deep belief network
    """
    # create the network using weights from pretrain_nn.mat
    nn = sio.loadmat(path)
    w1 = nn['w1']
    w2 = nn['w2']
    w3 = nn['w3']
    w4 = nn['w4']
    b1 = nn['b1'][0]
    b2 = nn['b2'][0]
    b3 = nn['b3'][0]
    b4 = nn['b4'][0]

    weights = [w1, w2, w3, w4]
    biases = [b1, b2, b3, b4]
    nonlinearities = [rectify, rectify, rectify, linear]
    shapes = [2000, 1000, 500, 50]
    return weights, biases, shapes, nonlinearities
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def test_load_params(self):
        window = T.iscalar('theta')
        inputs1 = T.tensor3('inputs1', dtype='float32')
        mask = T.matrix('mask', dtype='uint8')
        network = deltanet_majority_vote.load_saved_model('../oulu/results/best_models/1stream_mfcc_w3s3.6.pkl',
                                                          ([500, 200, 100, 50], [rectify, rectify, rectify, linear]),
                                                          (None, None, 91), inputs1, (None, None), mask,
                                                          250, window, 10)
        d = deltanet_majority_vote.extract_encoder_weights(network, ['fc1', 'fc2', 'fc3', 'bottleneck'],
                                                           [('w1', 'b1'), ('w2', 'b2'), ('w3', 'b3'), ('w4', 'b4')])
        b = deltanet_majority_vote.extract_lstm_weights(network, ['f_blstm1', 'b_blstm1'],
                                                        ['flstm', 'blstm'])
        expected_keys = ['w1', 'w2', 'w3', 'w4', 'b1', 'b2', 'b3', 'b4']
        keys = d.keys()
        for k in keys:
            assert k in expected_keys
            assert type(d[k]) == np.ndarray
        save_mat(d, '../oulu/models/oulu_1stream_mfcc_w3s3.mat')
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def load_dbn(path='models/oulu_ae.mat'):
    """
    load a pretrained dbn from path
    :param path: path to the .mat dbn
    :return: pretrained deep belief network
    """
    # create the network using weights from pretrain_nn.mat
    nn = sio.loadmat(path)
    w1 = nn['w1']
    w2 = nn['w2']
    w3 = nn['w3']
    w4 = nn['w4']
    b1 = nn['b1'][0]
    b2 = nn['b2'][0]
    b3 = nn['b3'][0]
    b4 = nn['b4'][0]
    weights = [w1, w2, w3, w4]
    biases = [b1, b2, b3, b4]
    shapes = [2000, 1000, 500, 50]
    nonlinearities = [rectify, rectify, rectify, linear]
    return weights, biases, shapes, nonlinearities
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_weights(ae):
    weights = []
    biases = []
    shapes = [2000, 1000, 500, 50]
    nonlinearities = [rectify, rectify, rectify, linear]
    ae_layers = ae.get_all_layers()
    weights.append(ae_layers[1].W.astype('float32'))
    weights.append(ae_layers[2].W.astype('float32'))
    weights.append(ae_layers[3].W.astype('float32'))
    weights.append(ae_layers[4].W.astype('float32'))
    biases.append(ae_layers[1].b.astype('float32'))
    biases.append(ae_layers[2].b.astype('float32'))
    biases.append(ae_layers[3].b.astype('float32'))
    biases.append(ae_layers[4].b.astype('float32'))

    return weights, biases, shapes, nonlinearities
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_weights(ae):
    weights = []
    biases = []
    shapes = [2000, 1000, 500, 50]
    nonlinearities = [rectify, rectify, rectify, linear]
    ae_layers = ae.get_all_layers()
    weights.append(ae_layers[1].W.astype('float32'))
    weights.append(ae_layers[2].W.astype('float32'))
    weights.append(ae_layers[3].W.astype('float32'))
    weights.append(ae_layers[4].W.astype('float32'))
    biases.append(ae_layers[1].b.astype('float32'))
    biases.append(ae_layers[2].b.astype('float32'))
    biases.append(ae_layers[3].b.astype('float32'))
    biases.append(ae_layers[4].b.astype('float32'))

    return weights, biases, shapes, nonlinearities
项目:2WayNet    作者:aviveise    | 项目源码 | 文件源码
def __init__(self, incoming, num_units,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, name=None, **kwargs):
        """
        An extention of a regular dense layer, enables the sharing of weight between two tied hidden layers. In order
        to tie two layers, the first should be initialized with an initialization function for the weights, the other
        should get the weight matrix of the first at input
        :param incoming: the input layer of this layer
        :param num_units: output size
        :param W: weight initialization, can be a initialization function or a given matrix
        :param b: bias initialization
        :param nonlinearity: non linearity function
        :param name: string
        :param kwargs:
        """
        super(TiedDenseLayer, self).__init__(incoming, num_units, W, b, nonlinearity, name=name)

        if not isinstance(W, lasagne.init.Initializer):
            self.params[self.W].remove('trainable')
            self.params[self.W].remove('regularizable')

        if self.b and not isinstance(b, lasagne.init.Initializer):
            self.params[self.b].remove('trainable')
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def __init__(self, incoming, nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(NonlinearityLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def InceptionUpscaleLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = TC2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                crop = dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(
                    incoming = lasagne.layers.dnn.Pool2DDNNLayer(
                        incoming = lasagne.layers.Upscale2DLayer(
                            incoming=incoming if j == 0 else branch[i],
                            scale_factor = dict['stride'][j]),
                        pool_size = dict['filter_size'][j],
                        stride = [1,1],
                        mode = dict['mode'][j],
                        pad = dict['pad'][j],
                        name = block_name+'_'+str(i)+'_'+str(j)),
                    nonlinearity = dict['nonlinearity'][j])
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        

    return CL(incomings=branch,name=block_name)

# Convenience function to efficiently generate param dictionaries for use with InceptioNlayer
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def pd(num_layers=2,num_filters=32,filter_size=(3,3),pad=1,stride = (1,1),nonlinearity=elu,style='convolutional',bnorm=1,**kwargs):
    input_args = locals()    
    input_args.pop('num_layers')
    return {key:entry if type(entry) is list else [entry]*num_layers for key,entry in input_args.iteritems()}  

# Possible Conv2DDNN convenience function. Remember to delete the C2D import at the top if you use this    
# def C2D(incoming = None, num_filters = 32, filter_size= [3,3],pad = 'same',stride = [1,1], W = initmethod('relu'),nonlinearity = elu,name = None):
    # return lasagne.layers.dnn.Conv2DDNNLayer(incoming,num_filters,filter_size,stride,pad,False,W,None,nonlinearity,False)

# Shape-Preserving Gaussian Sample layer for latent vectors with spatial dimensions.
# This is a holdover from an "old" (i.e. I abandoned it last month) idea.
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def has_ReLU(layer):
    relus = [lasagne.nonlinearities.rectify, T.nnet.relu]
    return (hasattr(layer, 'nonlinearity') and
            layer.nonlinearity in relus)
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def get_rectifier_layer(input_layer, rectifier_layer):
    if has_ReLU(rectifier_layer):
        return lasagne.layers.NonlinearityLayer(input_layer,
                                                nonlinearity=rectify)
    return input_layer
项目:melanoma-transfer    作者:learningtitans    | 项目源码 | 文件源码
def conv_params(num_filters, filter_size=(3, 3), stride=(1, 1), border_mode='same',
         nonlinearity=rectify, W=init.Orthogonal(gain=1.0),
         b=init.Constant(0.05), untie_biases=False, **kwargs):
    args = {
        'num_filters': num_filters,
        'filter_size': filter_size,
        'stride': stride,
        'pad': border_mode,         # The new version has 'pad' instead of 'border_mode'
        'nonlinearity': nonlinearity,
        'W': W,
        'b': b,
        'untie_biases': untie_biases,
    }
    args.update(kwargs)
    return args
项目:melanoma-transfer    作者:learningtitans    | 项目源码 | 文件源码
def dense_params(num_units, nonlinearity=rectify, **kwargs):
    args = {
        'num_units': num_units,
        'nonlinearity': nonlinearity,
        'W': init.Orthogonal(1.0),
        'b': init.Constant(0.05),
    }
    args.update(kwargs)
    return args
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def build_BiLSTM_HighCNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
                         peepholes=False, num_filters=20, dropout=True, in_to_out=False):
    # first get some necessary dimensions or parameters
    conv_window = 3
    _, sent_length, _ = incoming2.output_shape

    # dropout before cnn
    if dropout:
        incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)

    # construct convolution layer
    cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
                                           nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
    # infer the pool size for pooling (pool size should go through all time step of cnn)
    _, _, pool_size = cnn_layer.output_shape
    # construct max pool layer
    pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
    # reshape the layer to match highway incoming layer [batch * sent_length, num_filters, 1] --> [batch * sent_length, num_filters]
    output_cnn_layer = lasagne.layers.reshape(pool_layer, ([0], -1))

    # dropout after cnn?
    # if dropout:
    # output_cnn_layer = lasagne.layers.DropoutLayer(output_cnn_layer, p=0.5)

    # construct highway layer
    highway_layer = HighwayDenseLayer(output_cnn_layer, nonlinearity=nonlinearities.rectify)

    # reshape the layer to match lstm incoming layer [batch * sent_length, num_filters] --> [batch, sent_length, number_filters]
    output_highway_layer = lasagne.layers.reshape(highway_layer, (-1, sent_length, [1]))

    # finally, concatenate the two incoming layers together.
    incoming = lasagne.layers.concat([output_highway_layer, incoming2], axis=2)

    return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
                        precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 num_leading_axes=1, **kwargs):
        super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        if num_leading_axes >= len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "leaving no trailing axes for the dot product." %
                    (num_leading_axes, len(self.input_shape)))
        elif num_leading_axes < -len(self.input_shape):
            raise ValueError(
                    "Got num_leading_axes=%d for a %d-dimensional input, "
                    "requesting more trailing axes than there are input "
                    "dimensions." % (num_leading_axes, len(self.input_shape)))
        self.num_leading_axes = num_leading_axes

        if any(s is None for s in self.input_shape[num_leading_axes:]):
            raise ValueError(
                    "A DenseLayer requires a fixed input shape (except for "
                    "the leading axes). Got %r for num_leading_axes=%d." %
                    (self.input_shape, self.num_leading_axes))
        num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units,), name="b",
                                    regularizable=False)

        if args.regL1 is True:
            self.L1 = self.add_param(init.Constant(args.regInit['L1']),
                                     (num_inputs, num_units), name="L1")
        if args.regL2 is True:
            self.L2 = self.add_param(init.Constant(args.regInit['L2']),
                                     (num_inputs, num_units), name="L2")
项目:CIKM2017    作者:MovieFIB    | 项目源码 | 文件源码
def __init__(
        self, incoming, num_units,
        W=init.Constant(0.1),
        b=init.Constant(0.),
        nonlinearity=nonlinearities.rectify,
        **kwargs
    ):
        super(Tensor4LinearLayer, self).__init__(incoming, **kwargs)
        num_inputs = self.input_shape[-1]
        self.num_units = num_units
        self.W = self.add_param(
            W, (num_inputs, num_units),
            name="W"
        )
        if b:
            self.b = self.add_param(
                b,
                (
                    self.input_shape[1],
                    self.input_shape[2], self.num_units
                )
            )
        else:
            self.b = None
        if nonlinearity:
            self.nonlinearity = nonlinearity
        else:
            self.nonlinearity = nonlinearities.identity
项目:CIKM2017    作者:MovieFIB    | 项目源码 | 文件源码
def __init__(
        self, incoming, num_units,
        W=init.Constant(0.1),
        b=init.Constant(0.),
        nonlinearity=nonlinearities.rectify,
        **kwargs
    ):
        super(Tensor3LinearLayer, self).__init__(incoming, **kwargs)
        num_inputs = self.input_shape[-1]
        self.num_units = num_units
        self.W = self.add_param(
            W, (num_inputs, num_units),
            name="W"
        )
        if b:
            self.b = self.add_param(
                b,
                (
                    self.input_shape[1], self.num_units
                )
            )
        else:
            self.b = None
        if nonlinearity:
            self.nonlinearity = nonlinearity
        else:
            self.nonlinearity = nonlinearities.identity
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_12_net__(self):

        network = layers.InputLayer((None, 3, 12, 12), input_var=self.__input_var__)
        network = layers.dropout(network, p=0.1)
        network = layers.Conv2DLayer(network,num_filters=16,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DropoutLayer(network,p=0.3)        
        network = layers.DenseLayer(network,num_units = 16,nonlinearity = relu)
        network = layers.batch_norm(network)
        network = layers.DropoutLayer(network,p=0.3)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_24_net__(self):

        network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
        network = layers.dropout(network, p=0.1)
        network = layers.Conv2DLayer(network,num_filters=64,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.batch_norm(network)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DropoutLayer(network,p=0.5)
        network = layers.batch_norm(network)
        network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
        network = layers.DropoutLayer(network,p=0.5)
        network = layers.DenseLayer(network,num_units = 2, nonlinearity = softmax)
        return network
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_12_calib_net__(self):
        network = layers.InputLayer((None, 3, 12, 12), input_var=self.__input_var__)
        network = layers.Conv2DLayer(network,num_filters=16,filter_size=(3,3),stride=1,nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DenseLayer(network,num_units = 128,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
        return network
项目:Cascade-CNN-Face-Detection    作者:gogolgrind    | 项目源码 | 文件源码
def __build_24_calib_net__(self):
        network = layers.InputLayer((None, 3, 24, 24), input_var=self.__input_var__)
        network = layers.Conv2DLayer(network,num_filters=32,filter_size=(5,5),stride=1,nonlinearity=relu)
        network = layers.MaxPool2DLayer(network, pool_size = (3,3),stride = 2)
        network = layers.DenseLayer(network,num_units = 64,nonlinearity = relu)
        network = layers.DenseLayer(network,num_units = 45, nonlinearity = softmax)
        return network
项目:gogh-figure    作者:joelmoniz    | 项目源码 | 文件源码
def style_conv_block(conv_in, num_styles, num_filters, filter_size, stride, nonlinearity=rectify, normalization=instance_norm):
    sc_network = ReflectLayer(conv_in, filter_size//2)
    sc_network = normalization(ConvLayer(sc_network, num_filters, filter_size, stride, nonlinearity=nonlinearity, W=Normal()), num_styles=num_styles)
    return sc_network
项目:adda_mnist64    作者:davidtellez    | 项目源码 | 文件源码
def network_discriminator(self, features):

        network = {}
        network['discriminator/conv2'] = Conv2DLayer(features, num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv2')
        network['discriminator/pool2'] = MaxPool2DLayer(network['discriminator/conv2'], pool_size=2, stride=2, pad=0, name='discriminator/pool2')
        network['discriminator/conv3'] = Conv2DLayer(network['discriminator/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv3')
        network['discriminator/pool3'] = MaxPool2DLayer(network['discriminator/conv3'], pool_size=2, stride=2, pad=0, name='discriminator/pool3')
        network['discriminator/conv4'] = Conv2DLayer(network['discriminator/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv4')
        network['discriminator/pool4'] = MaxPool2DLayer(network['discriminator/conv4'], pool_size=2, stride=2, pad=0, name='discriminator/pool4')
        network['discriminator/dense1'] = DenseLayer(network['discriminator/pool4'], num_units=64, nonlinearity=rectify, name='discriminator/dense1')
        network['discriminator/output'] = DenseLayer(network['discriminator/dense1'], num_units=2, nonlinearity=softmax, name='discriminator/output')

        return network
项目:dqn_vizdoom_theano    作者:mihahauke    | 项目源码 | 文件源码
def _initialize_network(self, img_input_shape, misc_len, output_size, img_input, misc_input=None, **kwargs):

        input_layers = []
        inputs = [img_input]
        # weights_init = lasagne.init.GlorotUniform("relu")
        weights_init = lasagne.init.HeNormal("relu")

        network = ls.InputLayer(shape=img_input_shape, input_var=img_input)
        input_layers.append(network)
        network = ls.Conv2DLayer(network, num_filters=32, filter_size=8, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=4)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=4, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=2)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=3, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=1)

        if self.misc_state_included:
            inputs.append(misc_input)
            network = ls.FlattenLayer(network)
            misc_input_layer = ls.InputLayer(shape=(None, misc_len), input_var=misc_input)
            input_layers.append(misc_input_layer)
            if "additional_misc_layer" in kwargs:
                misc_input_layer = ls.DenseLayer(misc_input_layer, int(kwargs["additional_misc_layer"]),
                                                 nonlinearity=rectify,
                                                 W=weights_init, b=lasagne.init.Constant(0.1))

            network = ls.ConcatLayer([network, misc_input_layer])

        network = ls.DenseLayer(network, 512, nonlinearity=rectify,
                                W=weights_init, b=lasagne.init.Constant(0.1))

        network = ls.DenseLayer(network, output_size, nonlinearity=None, b=lasagne.init.Constant(.1))
        return network, input_layers, inputs
项目:dqn_vizdoom_theano    作者:mihahauke    | 项目源码 | 文件源码
def _initialize_network(self, img_input_shape, misc_len, output_size, img_input, misc_input=None, **kwargs):
        input_layers = []
        inputs = [img_input]
        # weights_init = lasagne.init.GlorotUniform("relu")
        weights_init = lasagne.init.HeNormal("relu")

        network = ls.InputLayer(shape=img_input_shape, input_var=img_input)
        input_layers.append(network)
        network = ls.Conv2DLayer(network, num_filters=32, filter_size=8, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(.1), stride=4)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=4, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(.1), stride=2)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=3, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(.1), stride=1)

        if self.misc_state_included:
            inputs.append(misc_input)
            network = ls.FlattenLayer(network)
            misc_input_layer = ls.InputLayer(shape=(None, misc_len), input_var=misc_input)
            input_layers.append(misc_input_layer)
            if "additional_misc_layer" in kwargs:
                misc_input_layer = ls.DenseLayer(misc_input_layer, int(kwargs["additional_misc_layer"]),
                                                 nonlinearity=rectify,
                                                 W=weights_init, b=lasagne.init.Constant(0.1))
            network = ls.ConcatLayer([network, misc_input_layer])

        # Duelling here

        advanteges_branch = ls.DenseLayer(network, 256, nonlinearity=rectify,
                                          W=weights_init, b=lasagne.init.Constant(.1))
        advanteges_branch = ls.DenseLayer(advanteges_branch, output_size, nonlinearity=None,
                                          b=lasagne.init.Constant(.1))

        state_value_branch = ls.DenseLayer(network, 256, nonlinearity=rectify,
                                           W=weights_init, b=lasagne.init.Constant(.1))
        state_value_branch = ls.DenseLayer(state_value_branch, 1, nonlinearity=None,
                                           b=lasagne.init.Constant(.1))

        network = DuellingMergeLayer([advanteges_branch, state_value_branch])
        return network, input_layers, inputs
项目:dqn_vizdoom_theano    作者:mihahauke    | 项目源码 | 文件源码
def _initialize_network(self, img_input_shape, misc_len, output_size, img_input, misc_input=None, **kwargs):
        input_layers = []
        inputs = [img_input]
        # weights_init = lasagne.init.GlorotUniform("relu")
        weights_init = lasagne.init.HeNormal("relu")

        network = ls.InputLayer(shape=img_input_shape, input_var=img_input)
        input_layers.append(network)
        network = ls.Conv2DLayer(network, num_filters=32, filter_size=8, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=4)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=4, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=2)
        network = ls.Conv2DLayer(network, num_filters=64, filter_size=3, nonlinearity=rectify, W=weights_init,
                                 b=lasagne.init.Constant(0.1), stride=1)
        network = ls.FlattenLayer(network)

        if self.misc_state_included:
            health_inputs = 4
            units_per_health_input = 100
            layers_for_merge = []
            for i in range(health_inputs):
                oh_input = lasagne.utils.one_hot(misc_input[:, i] - 1, units_per_health_input)
                health_input_layer = ls.InputLayer(shape=(None, units_per_health_input), input_var=oh_input)
                inputs.append(oh_input)
                input_layers.append(health_input_layer)
                layers_for_merge.append(health_input_layer)

            misc_input_layer = ls.InputLayer(shape=(None, misc_len - health_inputs),
                                             input_var=misc_input[:, health_inputs:])
            input_layers.append(misc_input_layer)
            layers_for_merge.append(misc_input_layer)
            inputs.append(misc_input[:, health_inputs:])

            layers_for_merge.append(network)
            network = ls.ConcatLayer(layers_for_merge)

        network = ls.DenseLayer(network, 512, nonlinearity=rectify,
                                W=weights_init, b=lasagne.init.Constant(0.1))

        network = ls.DenseLayer(network, output_size, nonlinearity=None, b=lasagne.init.Constant(.1))
        return network, input_layers, inputs
项目:bmvc16_face    作者:stephenjia    | 项目源码 | 文件源码
def build_model(self, img_batch, pose_code):        

        img_size = self.options['img_size']
        pose_code_size = self.options['pose_code_size']                        
        filter_size = self.options['filter_size']        
        batch_size = img_batch.shape[0]

        # image encoding        
        l_in = InputLayer(shape = [None, img_size[0], img_size[1], img_size[2]], input_var=img_batch)
        l_in_dimshuffle = DimshuffleLayer(l_in, (0,3,1,2))

        l_conv1_1 = Conv2DLayer(l_in_dimshuffle, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))        
        l_conv1_2 = Conv2DLayer(l_conv1_1, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_pool1 = MaxPool2DLayer(l_conv1_2, pool_size=(2,2)) 

        # pose encoding
        l_in_2 = InputLayer(shape=(None, pose_code_size), input_var=pose_code)     
        l_pose_1 = DenseLayer(l_in_2, num_units=512, W=HeNormal(),nonlinearity=rectify)
        l_pose_2 = DenseLayer(l_pose_1, num_units=pose_code_size*l_pool1.output_shape[2]*l_pool1.output_shape[3], W=HeNormal(),nonlinearity=rectify)
        l_pose_reshape = ReshapeLayer(l_pose_2, shape=(batch_size, pose_code_size, l_pool1.output_shape[2], l_pool1.output_shape[3])) 

        # deeper fusion
        l_concat = ConcatLayer([l_pool1, l_pose_reshape], axis=1)
        l_pose_conv_1 = Conv2DLayer(l_concat, num_filters=128, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2)) 
        l_pose_conv_2 = Conv2DLayer(l_pose_conv_1, num_filters=128, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_pool2 = MaxPool2DLayer(l_pose_conv_2, pool_size=(2,2))         
        l_conv_3 = Conv2DLayer(l_pool2, num_filters=128, filter_size=(1,1), W=HeNormal()) 
        l_unpool1 = Unpool2DLayer(l_conv_3, ds = (2,2))

        # image decoding
        l_deconv_conv1_1 = Conv2DLayer(l_unpool1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv1_2 = Conv2DLayer(l_deconv_conv1_1, num_filters=64, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_unpool2 = Unpool2DLayer(l_deconv_conv1_2, ds = (2,2))
        l_deconv_conv2_1 = Conv2DLayer(l_unpool2, num_filters=64, filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv2_2 = Conv2DLayer(l_deconv_conv2_1, num_filters=img_size[2], filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))  

        return l_deconv_conv2_2, l_pose_reshape
项目:bmvc16_face    作者:stephenjia    | 项目源码 | 文件源码
def build_model(self, img_batch, img_batch_gen):

        img_size = self.options['img_size']
        pose_code_size = self.options['pose_code_size']                        
        filter_size = self.options['filter_size']        
        batch_size = img_batch.shape[0]

        # image encoding               
        l_in_1 = InputLayer(shape = [None, img_size[0], img_size[1], img_size[2]], input_var=img_batch)
        l_in_1_dimshuffle = DimshuffleLayer(l_in_1, (0,3,1,2))        
        l_in_2 = InputLayer(shape = [None, img_size[0], img_size[1], img_size[2]], input_var=img_batch_gen)
        l_in_2_dimshuffle = DimshuffleLayer(l_in_2, (0,3,1,2)) 
        l_in_concat = ConcatLayer([l_in_1_dimshuffle, l_in_2_dimshuffle], axis=1)                         

        l_conv1_1 = Conv2DLayer(l_in_concat, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))        
        l_conv1_2 = Conv2DLayer(l_conv1_1, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_pool1 = MaxPool2DLayer(l_conv1_2, pool_size=(2,2)) 

        l_conv2_1 = Conv2DLayer(l_pool1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_conv2_2 = Conv2DLayer(l_conv2_1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_pool2 = MaxPool2DLayer(l_conv2_2, pool_size=(2,2))         
        l_conv_3 = Conv2DLayer(l_pool2, num_filters=128, filter_size=(1,1), W=HeNormal())
        l_unpool1 = Unpool2DLayer(l_conv_3, ds = (2,2))        

        # image decoding
        l_deconv_conv1_1 = Conv2DLayer(l_unpool1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv1_2 = Conv2DLayer(l_deconv_conv1_1, num_filters=64, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_unpool2 = Unpool2DLayer(l_deconv_conv1_2, ds = (2,2))
        l_deconv_conv2_1 = Conv2DLayer(l_unpool2, num_filters=64, filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv2_2 = Conv2DLayer(l_deconv_conv2_1, num_filters=img_size[2], filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))  

        return l_deconv_conv2_2
项目:bmvc16_face    作者:stephenjia    | 项目源码 | 文件源码
def build_model(self, img_batch, pose_code):        

        img_size = self.options['img_size']
        pose_code_size = self.options['pose_code_size']                        
        filter_size = self.options['filter_size']        
        batch_size = img_batch.shape[0]

        # image encoding        
        l_in = InputLayer(shape = [None, img_size[0], img_size[1], img_size[2]], input_var=img_batch)
        l_in_dimshuffle = DimshuffleLayer(l_in, (0,3,1,2))

        l_conv1_1 = Conv2DLayer(l_in_dimshuffle, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))        
        l_conv1_2 = Conv2DLayer(l_conv1_1, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_pool1 = MaxPool2DLayer(l_conv1_2, pool_size=(2,2)) 

        # pose encoding
        l_in_2 = InputLayer(shape=(None, pose_code_size), input_var=pose_code)     
        l_pose_1 = DenseLayer(l_in_2, num_units=512, W=HeNormal(),nonlinearity=rectify)
        l_pose_2 = DenseLayer(l_pose_1, num_units=pose_code_size*l_pool1.output_shape[2]*l_pool1.output_shape[3], W=HeNormal(),nonlinearity=rectify)
        l_pose_reshape = ReshapeLayer(l_pose_2, shape=(batch_size, pose_code_size, l_pool1.output_shape[2], l_pool1.output_shape[3])) 

        # deeper fusion
        l_concat = ConcatLayer([l_pool1, l_pose_reshape], axis=1)
        l_pose_conv_1 = Conv2DLayer(l_concat, num_filters=128, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2)) 
        l_pose_conv_2 = Conv2DLayer(l_pose_conv_1, num_filters=128, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_pool2 = MaxPool2DLayer(l_pose_conv_2, pool_size=(2,2))         
        l_conv_3 = Conv2DLayer(l_pool2, num_filters=128, filter_size=(1,1), W=HeNormal()) 
        l_unpool1 = Unpool2DLayer(l_conv_3, ds = (2,2))

        # image decoding
        l_deconv_conv1_1 = Conv2DLayer(l_unpool1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv1_2 = Conv2DLayer(l_deconv_conv1_1, num_filters=64, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_unpool2 = Unpool2DLayer(l_deconv_conv1_2, ds = (2,2))
        l_deconv_conv2_1 = Conv2DLayer(l_unpool2, num_filters=64, filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv2_2 = Conv2DLayer(l_deconv_conv2_1, num_filters=img_size[2], filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))  

        return l_deconv_conv2_2, l_pose_reshape
项目:bmvc16_face    作者:stephenjia    | 项目源码 | 文件源码
def build_model(self, img_batch, img_batch_gen):

        img_size = self.options['img_size']
        pose_code_size = self.options['pose_code_size']                        
        filter_size = self.options['filter_size']        
        batch_size = img_batch.shape[0]

        # image encoding               
        l_in_1 = InputLayer(shape = [None, img_size[0], img_size[1], img_size[2]], input_var=img_batch)
        l_in_1_dimshuffle = DimshuffleLayer(l_in_1, (0,3,1,2))        
        l_in_2 = InputLayer(shape = [None, img_size[0], img_size[1], img_size[2]], input_var=img_batch_gen)
        l_in_2_dimshuffle = DimshuffleLayer(l_in_2, (0,3,1,2)) 
        l_in_concat = ConcatLayer([l_in_1_dimshuffle, l_in_2_dimshuffle], axis=1)                         

        l_conv1_1 = Conv2DLayer(l_in_concat, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))        
        l_conv1_2 = Conv2DLayer(l_conv1_1, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_pool1 = MaxPool2DLayer(l_conv1_2, pool_size=(2,2)) 

        l_conv2_1 = Conv2DLayer(l_pool1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_conv2_2 = Conv2DLayer(l_conv2_1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_pool2 = MaxPool2DLayer(l_conv2_2, pool_size=(2,2))         
        l_conv_3 = Conv2DLayer(l_pool2, num_filters=128, filter_size=(1,1), W=HeNormal())
        l_unpool1 = Unpool2DLayer(l_conv_3, ds = (2,2))        

        # image decoding
        l_deconv_conv1_1 = Conv2DLayer(l_unpool1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv1_2 = Conv2DLayer(l_deconv_conv1_1, num_filters=64, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_unpool2 = Unpool2DLayer(l_deconv_conv1_2, ds = (2,2))
        l_deconv_conv2_1 = Conv2DLayer(l_unpool2, num_filters=64, filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv2_2 = Conv2DLayer(l_deconv_conv2_1, num_filters=img_size[2], filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))  

        return l_deconv_conv2_2
项目:bmvc16_face    作者:stephenjia    | 项目源码 | 文件源码
def build_model(self, img_batch, pose_code):        

        img_size = self.options['img_size']
        pose_code_size = self.options['pose_code_size']                        
        filter_size = self.options['filter_size']        
        batch_size = img_batch.shape[0]

        # image encoding        
        l_in = InputLayer(shape = [None, img_size[0], img_size[1], img_size[2]], input_var=img_batch)
        l_in_dimshuffle = DimshuffleLayer(l_in, (0,3,1,2))

        l_conv1_1 = Conv2DLayer(l_in_dimshuffle, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))        
        l_conv1_2 = Conv2DLayer(l_conv1_1, num_filters=64, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_pool1 = MaxPool2DLayer(l_conv1_2, pool_size=(2,2)) 

        # pose encoding
        l_in_2 = InputLayer(shape=(None, pose_code_size), input_var=pose_code)     
        l_pose_1 = DenseLayer(l_in_2, num_units=512, W=HeNormal(),nonlinearity=rectify)
        l_pose_2 = DenseLayer(l_pose_1, num_units=pose_code_size*l_pool1.output_shape[2]*l_pool1.output_shape[3], W=HeNormal(),nonlinearity=rectify)
        l_pose_reshape = ReshapeLayer(l_pose_2, shape=(batch_size, pose_code_size, l_pool1.output_shape[2], l_pool1.output_shape[3])) 

        # deeper fusion
        l_concat = ConcatLayer([l_pool1, l_pose_reshape], axis=1)
        l_pose_conv_1 = Conv2DLayer(l_concat, num_filters=128, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2)) 
        l_pose_conv_2 = Conv2DLayer(l_pose_conv_1, num_filters=128, filter_size=filter_size, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_pool2 = MaxPool2DLayer(l_pose_conv_2, pool_size=(2,2))         
        l_conv_3 = Conv2DLayer(l_pool2, num_filters=128, filter_size=(1,1), W=HeNormal()) 
        l_unpool1 = Unpool2DLayer(l_conv_3, ds = (2,2))

        # image decoding
        l_deconv_conv1_1 = Conv2DLayer(l_unpool1, num_filters=128, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv1_2 = Conv2DLayer(l_deconv_conv1_1, num_filters=64, filter_size=filter_size, nonlinearity=rectify,W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))

        l_unpool2 = Unpool2DLayer(l_deconv_conv1_2, ds = (2,2))
        l_deconv_conv2_1 = Conv2DLayer(l_unpool2, num_filters=64, filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))
        l_deconv_conv2_2 = Conv2DLayer(l_deconv_conv2_1, num_filters=img_size[2], filter_size=filter_size, nonlinearity=None, W=HeNormal(), pad=(filter_size[0]//2, filter_size[1]//2))  

        return l_deconv_conv2_2, l_pose_reshape
项目:AttentionNet    作者:sayvazov    | 项目源码 | 文件源码
def build_emission_network(r2):
    if not isinstance(r2, lasagne.layers.Layer):
        l_in = lasagne.layers.InputLayer((None, glimpse_output_size, recurrent_output_size), r2)
    else:
        l_in = r2
    output = lasagne.layers.DenseLayer(l_in, 2, nonlinearity=nl.rectify, 
                                       W = emission_weights, b = emission_bias)
    return output

#input is r1 of length glimpse_output_size
#output is labels of length classification_units
项目:AttentionNet    作者:sayvazov    | 项目源码 | 文件源码
def build_context_network(downsample):
    if not isinstance(downsample, lasagne.layers.Layer):
        l_in = lasagne.layers.InputLayer((None, 1, downsample_rows, downsample_cols), downsample)
    else:
        l_in = downsample
    first_conv = lasagne.layers.Conv2DLayer(l_in,
                                             context_number_of_convolving_filters,
                                             context_convolving_filter_size, 
                                             stride = 1,
                                             pad = 'same',
                                             nonlinearity = nl.rectify)
    first_pool = lasagne.layers.MaxPool2DLayer(first_conv, context_pool_rate)
    second_conv = lasagne.layers.Conv2DLayer(first_pool,
                                             context_number_of_convolving_filters,
                                             context_convolving_filter_size, 
                                             stride = 1,
                                             pad = 'same',
                                             nonlinearity = nl.rectify)
    second_pool = lasagne.layers.MaxPool2DLayer(second_conv, context_pool_rate)
    third_conv = lasagne.layers.Conv2DLayer(second_pool,
                                             context_number_of_convolving_filters,
                                             context_convolving_filter_size, 
                                             stride = 1,
                                             pad = 'same',
                                             nonlinearity = nl.rectify)
    third_pool = lasagne.layers.MaxPool2DLayer(third_conv, context_pool_rate)
    fc = lasagne.layers.DenseLayer(third_pool, 
                                       glimpse_output_size*recurrent_output_size, 
                                       nonlinearity = nl.rectify)
    output = lasagne.layers.ReshapeLayer(fc, (-1, glimpse_output_size, recurrent_output_size))
    return output
项目:RL4Data    作者:fyabc    | 项目源码 | 文件源码
def build_cnn(self, input_var=None):
        # Building the network
        layer_in = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)

        # Conv1
        # [NOTE]: normal vs. truncated normal?
        # [NOTE]: conv in lasagne is not same as it in TensorFlow.
        layer = ConvLayer(layer_in, num_filters=64, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify,
                          pad='same', W=lasagne.init.HeNormal(), flip_filters=False)
        # Pool1
        layer = MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2))
        # Norm1
        layer = LocalResponseNormalization2DLayer(layer, alpha=0.001 / 9.0, k=1.0, beta=0.75)

        # Conv2
        layer = ConvLayer(layer, num_filters=64, filter_size=(5, 5), stride=(1, 1), nonlinearity=rectify,
                          pad='same', W=lasagne.init.HeNormal(), flip_filters=False)
        # Norm2
        # [NOTE]: n must be odd, but n in Chang's code is 4?
        layer = LocalResponseNormalization2DLayer(layer, alpha=0.001 / 9.0, k=1.0, beta=0.75)
        # Pool2
        layer = MaxPool2DLayer(layer, pool_size=(3, 3), stride=(2, 2))

        # Reshape
        layer = lasagne.layers.ReshapeLayer(layer, shape=([0], -1))

        # Dense3
        layer = DenseLayer(layer, num_units=384, W=lasagne.init.HeNormal(), b=lasagne.init.Constant(0.1))

        # Dense4
        layer = DenseLayer(layer, num_units=192, W=lasagne.init.Normal(std=0.04), b=lasagne.init.Constant(0.1))

        # Softmax
        layer = DenseLayer(layer, num_units=self.output_size,
                           W=lasagne.init.Normal(std=1. / 192.0), nonlinearity=softmax)

        return layer
项目:2WayNet    作者:aviveise    | 项目源码 | 文件源码
def __init__(self, incoming,
                 gamma=init.Uniform([0.95, 1.05]),
                 beta=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 epsilon=0.001,
                 **kwargs):
        super(BatchNormalizationLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_units = int(numpy.prod(self.input_shape[1:]))
        self.gamma = self.add_param(gamma, (self.num_units,), name="BatchNormalizationLayer:gamma", regularizable=True,
                                    gamma=True, trainable=True)
        self.beta = self.add_param(beta, (self.num_units,), name="BatchNormalizationLayer:beta", regularizable=False)
        self.epsilon = epsilon

        self.mean_inference = theano.shared(
            numpy.zeros((1, self.num_units), dtype=theano.config.floatX),
            borrow=True,
            broadcastable=(True, False))
        self.mean_inference.name = "shared:mean"

        self.variance_inference = theano.shared(
            numpy.zeros((1, self.num_units), dtype=theano.config.floatX),
            borrow=True,
            broadcastable=(True, False))
        self.variance_inference.name = "shared:variance"
项目:2WayNet    作者:aviveise    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, cell_num, W=lasagne.init.GlorotUniform(),
                 b=lasagne.init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 name=None, **kwargs):
        super(LocallyDenseLayer, self).__init__(incoming, name)
        self.nonlinearity = (nonlinearities.identity if nonlinearity is None
                             else nonlinearity)

        self.num_units = num_units

        num_inputs = int(np.prod(self.input_shape[1:]))
        self.cell_input_size = num_inputs / cell_num
        self.cell_size = self.num_units / cell_num

        if isinstance(W, lasagne.init.Initializer):
            W = [W for i in range(0, cell_num)]

        if isinstance(b, lasagne.init.Initializer):
            b = [b for i in range(0, cell_num)]

        self._dense_layers = []
        self.W = []
        self.b = []

        # Creating m number of tied dense layers
        for i in range(cell_num):
            self._dense_layers.append(TiedDenseLayer(CutLayer(incoming, cell_num),
                                                     self.cell_size, W[i], b[i], nonlinearity, **kwargs))

            self.W.append(self._dense_layers[-1].W)
            self.b.append(self._dense_layers[-1].b)
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def MDCL(incoming,num_filters,scales,name,dnn=True):
    if dnn:
        from lasagne.layers.dnn import Conv2DDNNLayer as C2D
    # W initialization method--this should also work as Orthogonal('relu'), but I have yet to validate that as thoroughly.
    winit = initmethod(0.02)

    # Initialization method for the coefficients
    sinit = lasagne.init.Constant(1.0/(1+len(scales)))

    # Number of incoming channels
    ni =lasagne.layers.get_output_shape(incoming)[1]

    # Weight parameter--the primary parameter for this block
    W = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,lasagne.layers.get_output_shape(incoming)[1],3,3))),name=name+'W')

    # Primary Convolution Layer--No Dilation
    n = C2D(incoming = incoming,
                            num_filters = num_filters,
                            filter_size = [3,3],
                            stride = [1,1],
                            pad = (1,1),
                            W = W*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_base').dimshuffle(0,'x','x','x'), # Note the broadcasting dimshuffle for the num_filter scalars.
                            b = None,
                            nonlinearity = None,
                            name = name+'base'
                        )
    # List of remaining layers. This should probably just all be concatenated into a single list rather than being a separate deal.
    nd = []    
    for i,scale in enumerate(scales):

        # I don't think 0 dilation is technically defined (or if it is it's just the regular filter) but I use it here as a convenient keyword to grab the 1x1 mean conv.
        if scale==0:
            nd.append(C2D(incoming = incoming,
                            num_filters = num_filters,
                            filter_size = [1,1],
                            stride = [1,1],
                            pad = (0,0),
                            W = T.mean(W,axis=[2,3]).dimshuffle(0,1,'x','x')*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_1x1').dimshuffle(0,'x','x','x'),
                            b = None,
                            nonlinearity = None,
                            name = name+str(scale)))
        # Note the dimshuffles in this layer--these are critical as the current DilatedConv2D implementation uses a backward pass.
        else:
            nd.append(lasagne.layers.DilatedConv2DLayer(incoming = lasagne.layers.PadLayer(incoming = incoming, width=(scale,scale)),
                                num_filters = num_filters,
                                filter_size = [3,3],
                                dilation=(scale,scale),
                                W = W.dimshuffle(1,0,2,3)*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_'+str(scale)).dimshuffle('x',0,'x','x'),
                                b = None,
                                nonlinearity = None,
                                name =  name+str(scale)))
    return ESL(nd+[n])

# MDC-based Upsample Layer.
# This is a prototype I don't make use of extensively. It's operational but it doesn't seem to improve results yet.
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def InceptionLayer(incoming,param_dict,block_name):
    branch = [0]*len(param_dict)
    # Loop across branches
    for i,dict in enumerate(param_dict):
        for j,style in enumerate(dict['style']): # Loop up branch
            branch[i] = C2D(
                incoming = branch[i] if j else incoming,
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                pad =  dict['pad'][j] if 'pad' in dict else None,
                stride = dict['stride'][j],
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j)) if style=='convolutional'\
            else NL(lasagne.layers.dnn.Pool2DDNNLayer(
                incoming=incoming if j == 0 else branch[i],
                pool_size = dict['filter_size'][j],
                mode = dict['mode'][j],
                stride = dict['stride'][j],
                pad = dict['pad'][j],
                name = block_name+'_'+str(i)+'_'+str(j)),
                nonlinearity = dict['nonlinearity'][j]) if style=='pool'\
            else lasagne.layers.DilatedConv2DLayer(
                incoming = lasagne.layers.PadLayer(incoming = incoming if j==0 else branch[i],width = dict['pad'][j]) if 'pad' in dict else incoming if j==0 else branch[i],
                num_filters = dict['num_filters'][j],
                filter_size = dict['filter_size'][j],
                dilation = dict['dilation'][j],
                # pad = dict['pad'][j] if 'pad' in dict else None,
                W = initmethod('relu'),
                nonlinearity = dict['nonlinearity'][j],
                name = block_name+'_'+str(i)+'_'+str(j))  if style== 'dilation'\
            else DL(
                    incoming = incoming if j==0 else branch[i],
                    num_units = dict['num_filters'][j],
                    W = initmethod('relu'),
                    b = None,
                    nonlinearity = dict['nonlinearity'][j],
                    name = block_name+'_'+str(i)+'_'+str(j))   
                # Apply Batchnorm    
            branch[i] = BN(branch[i],name = block_name+'_bnorm_'+str(i)+'_'+str(j)) if dict['bnorm'][j] else branch[i]
        # Concatenate Sublayers        

    return CL(incomings=branch,name=block_name)

# Convenience function to define an inception-style block with upscaling