Python lasagne.nonlinearities 模块,sigmoid() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用lasagne.nonlinearities.sigmoid()

项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def highway_conv3(incoming, nonlinearity=nn.nonlinearities.rectify, **kwargs):
    wh = nn.init.Orthogonal('relu')
    bh = nn.init.Constant(0.0)
    wt = nn.init.Orthogonal('relu')
    bt = nn.init.Constant(-2.)
    num_filters = incoming.output_shape[1]

    # H
    l_h = Conv2DDNNLayer(incoming, num_filters=num_filters,
                         filter_size=(3, 3), stride=(1, 1),
                         pad='same', W=wh, b=bh,
                         nonlinearity=nonlinearity)
    # T
    l_t = Conv2DDNNLayer(incoming, num_filters=num_filters,
                         filter_size=(3, 3), stride=(1, 1),
                         pad='same', W=wt, b=bt,
                         nonlinearity=T.nnet.sigmoid)

    return HighwayLayer(gate=l_t, input1=l_h, input2=incoming)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        # if the input has more than two dimensions, flatten it into a
        # batch of feature vectors.
        input_reshape = input.flatten(2) if input.ndim > 2 else input

        activation = T.dot(input_reshape, self.W_h)
        if self.b_h is not None:
            activation = activation + self.b_h.dimshuffle('x', 0)
            activation = self.nonlinearity(activation)

        transform = T.dot(input_reshape, self.W_t)
        if self.b_t is not None:
            transform = transform + self.b_t.dimshuffle('x', 0)
            transform = nonlinearities.sigmoid(transform)

        carry = 1.0 - transform

        output = activation * transform + input_reshape * carry
        # reshape output back to orignal input_shape
        if input.ndim > 2:
            output = T.reshape(output, input.shape)

        return output
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def exe_rnn(use_embedd, length, num_units, position, binominal):
    batch_size = BATCH_SIZE

    input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
    target_var = T.ivector(name='targets')

    layer_input = lasagne.layers.InputLayer(shape=(None, length, 1), input_var=input_var, name='input')
    if use_embedd:
        layer_position = construct_position_input(batch_size, length, num_units)
        layer_input = lasagne.layers.concat([layer_input, layer_position], axis=2)

    layer_rnn = RecurrentLayer(layer_input, num_units, nonlinearity=nonlinearities.tanh, only_return_final=True,
                               W_in_to_hid=lasagne.init.GlorotUniform(), W_hid_to_hid=lasagne.init.GlorotUniform(),
                               b=lasagne.init.Constant(0.), name='RNN')
    # W = layer_rnn.W_hid_to_hid.sum()
    # U = layer_rnn.W_in_to_hid.sum()
    # b = layer_rnn.b.sum()

    layer_output = DenseLayer(layer_rnn, num_units=1, nonlinearity=nonlinearities.sigmoid, name='output')

    return train(layer_output, layer_rnn, input_var, target_var, batch_size, length, position, binominal)
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_critic(input_var=None, verbose=False):
    from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
                                DenseLayer)
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import LeakyRectify, sigmoid
    lrelu = LeakyRectify(0.2)
    # input: (None, 1, 28, 28)
    layer = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
    # two convolutions
    layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 256, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    layer = batch_norm(Conv2DLayer(layer, 512, 5, stride=2, pad='same',
                                   nonlinearity=lrelu))
    # # fully-connected layer
    # layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
    # output layer (linear)
    layer = DenseLayer(layer, 1, nonlinearity=None)
    if verbose: print ("critic output:", layer.output_shape)
    return layer
项目:LasagneNLP    作者:XuezheMax    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        # if the input has more than two dimensions, flatten it into a
        # batch of feature vectors.
        input_reshape = input.flatten(2) if input.ndim > 2 else input

        activation = T.dot(input_reshape, self.W_h)
        if self.b_h is not None:
            activation = activation + self.b_h.dimshuffle('x', 0)
            activation = self.nonlinearity(activation)

        transform = T.dot(input_reshape, self.W_t)
        if self.b_t is not None:
            transform = transform + self.b_t.dimshuffle('x', 0)
            transform = nonlinearities.sigmoid(transform)

        carry = 1.0 - transform

        output = activation * transform + input_reshape * carry
        # reshape output back to orignal input_shape
        if input.ndim > 2:
            output = T.reshape(output, input.shape)

        return output
项目:gogh-figure    作者:joelmoniz    | 项目源码 | 文件源码
def setup_transform_net(self, input_var=None):
        transform_net = InputLayer(shape=self.shape, input_var=input_var)
        transform_net = style_conv_block(transform_net, self.num_styles, 32, 9, 1)
        transform_net = style_conv_block(transform_net, self.num_styles, 64, 3, 2)
        transform_net = style_conv_block(transform_net, self.num_styles, 128, 3, 2)
        for _ in range(5):
            transform_net = residual_block(transform_net, self.num_styles)
        transform_net = nn_upsample(transform_net, self.num_styles)
        transform_net = nn_upsample(transform_net, self.num_styles)

        if self.net_type == 0:
            transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, tanh)
            transform_net = ExpressionLayer(transform_net, lambda X: 150.*X, output_shape=None)
        elif self.net_type == 1:
            transform_net = style_conv_block(transform_net, self.num_styles, 3, 9, 1, sigmoid)

        self.network['transform_net'] = transform_net
项目:ConvolutionalAutoEncoder    作者:ToniCreswell    | 项目源码 | 文件源码
def build_net(nz=10):
    # nz = size of latent code
    #N.B. using batch_norm applies bn before non-linearity!
    F=32
    enc = InputLayer(shape=(None,1,28,28))
    enc = Conv2DLayer(incoming=enc, num_filters=F*2, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=2, nonlinearity=lrelu(0.2),pad=2)
    enc = Conv2DLayer(incoming=enc, num_filters=F*4, filter_size=5,stride=1, nonlinearity=lrelu(0.2),pad=2)
    enc = reshape(incoming=enc, shape=(-1,F*4*7*7))
    enc = DenseLayer(incoming=enc, num_units=nz, nonlinearity=sigmoid)
    #Generator networks
    dec = InputLayer(shape=(None,nz))
    dec = DenseLayer(incoming=dec, num_units=F*4*7*7)
    dec = reshape(incoming=dec, shape=(-1,F*4,7,7))
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=F*4, filter_size=4, stride=2, nonlinearity=relu, crop=1)
    dec = Deconv2DLayer(incoming=dec, num_filters=1, filter_size=3, stride=1, nonlinearity=sigmoid, crop=1)

    return enc, dec
项目:StockPredictor    作者:wallsbreaker    | 项目源码 | 文件源码
def get_model_by_strategy(input_var, output_nodes=1, dnn_strategy='mix'):
    features_type = len(config.cols_dimension)
    perioid = config.before
    features_dim = features_type * perioid

    network = build_mix(input_var, output_nodes, features_type, features_dim, perioid, activity=sigmoid)
    if dnn_strategy == 'dnn':
        build_dnn(input_var, output_nodes, features_type, features_dim, perioid, activity=sigmoid)
    elif dnn_strategy == 'conv1d':
        build_conv1d(input_var, output_nodes, features_type, features_dim, perioid, activity=sigmoid)
    elif dnn_strategy == 'cascade':
        build_cascade(input_var, output_nodes, features_type, features_dim, perioid, activity=sigmoid)
    elif dnn_strategy == 'lstm':
        build_lstm(input_var, output_nodes, features_type, features_dim, perioid, activity=sigmoid)
    elif dnn_strategy == 'partitioned':
        build_partitioned(input_var, output_nodes, features_type, features_dim, perioid, activity=sigmoid)
    elif dnn_strategy == 'mix':
        pass
    else:
        raise AttributeError("This dnn_strategy is not supported!")

    return network
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=adadelta,
        update_learning_rate=0.01,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=adadelta,
        update_learning_rate=0.01,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def compile_encoder(encoderpath=None):
    # create input
    if encoderpath:
        l_encoder = pickle.load(open(encoderpath, 'rb'))
        input_var = las.layers.get_all_layers(l_encoder)[0].input_var
        visualize_layer(las.layers.get_all_layers(l_encoder)[2], 40, 30)
    else:
        input_var = T.matrix('input', dtype='float32')
        weights, biases = autoencoder.load_dbn()
        en_activations = [sigmoid, sigmoid, sigmoid, linear]
        en_layersizes = [2000, 1000, 500, 50]
        l_input = InputLayer((None, 1200), input_var, name='input')
        l_encoder = autoencoder.create_model(l_input, weights[:4], biases[:4], en_activations, en_layersizes)
    print_network(l_encoder)

    encoded_features = las.layers.get_output(l_encoder)
    encode_fn = theano.function([input_var], encoded_features, allow_input_downcast=True)
    return encode_fn
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def build_encoder_layers(input_size, encode_size, sigma=0.5):
    """
    builds an autoencoder with gaussian noise layer
    :param input_size: input size
    :param encode_size: encoded size
    :param sigma: gaussian noise standard deviation
    :return: Weights of encoder layer, denoising autoencoder layer
    """
    W = theano.shared(GlorotUniform().sample(shape=(input_size, encode_size)))

    layers = [
        (InputLayer, {'shape': (None, input_size)}),
        (GaussianNoiseLayer, {'name': 'corrupt', 'sigma': sigma}),
        (DenseLayer, {'name': 'encoder', 'num_units': encode_size, 'nonlinearity': sigmoid, 'W': W}),
        (DenseLayer, {'name': 'decoder', 'num_units': input_size, 'nonlinearity': linear, 'W': W.T}),
    ]
    return W, layers
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=adadelta,
        update_learning_rate=0.01,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def extract_encoder(dbn):
    dbn_layers = dbn.get_all_layers()
    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': dbn_layers[0].shape}),
            (DenseLayer, {'name': 'l1', 'num_units': dbn_layers[1].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[1].W, 'b': dbn_layers[1].b}),
            (DenseLayer, {'name': 'l2', 'num_units': dbn_layers[2].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[2].W, 'b': dbn_layers[2].b}),
            (DenseLayer, {'name': 'l3', 'num_units': dbn_layers[3].num_units, 'nonlinearity': sigmoid,
                          'W': dbn_layers[3].W, 'b': dbn_layers[3].b}),
            (DenseLayer, {'name': 'l4', 'num_units': dbn_layers[4].num_units, 'nonlinearity': linear,
                          'W': dbn_layers[4].W, 'b': dbn_layers[4].b}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder
项目:time_lstm    作者:DarryO    | 项目源码 | 文件源码
def __init__(self, W_in=init.Normal(0.1), W_hid=init.Normal(0.1),
                 W_cell=init.Normal(0.1), W_to=init.Normal(0.1),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.sigmoid):
        self.W_in = W_in
        self.W_hid = W_hid
        self.W_to = W_to
        # Don't store a cell weight vector when cell is None
        if W_cell is not None:
            self.W_cell = W_cell
        self.b = b
        # For the nonlinearity, if None is supplied, use identity
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def define(self, n_units = 1):
    self.sample_weights = T.fvector(name='weights')
    self.labels = T.fvector(name='labels')
    self.input = T.fmatrix(name='input')

    input_layer = layers.InputLayer(shape=(None , 1), input_var=self.input)

    dense1 = layers.DenseLayer(
      input_layer,
      num_units=n_units,
      nonlinearity=nonlinearities.sigmoid
    )

    self.net = layers.DenseLayer(
      dense1,
      num_units=1,
      nonlinearity=nonlinearities.sigmoid
    )
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def __init__(self, incomings, num_units, nonlinearity=nonlinearities.sigmoid,
               W=init.Uniform(), b = init.Constant(0.0), **kwargs):
    super(MergeDense, self).__init__(incomings=incomings, **kwargs)

    self.num_units = num_units

    self.input_shapes = [ inc.output_shape for inc in incomings ]

    self.weights = [
      self.get_weights(W, shape=input_shape, name='W%d' % i)
      for i, input_shape in enumerate(self.input_shapes)
    ]

    self.b = self.add_param(b, (self.num_units,), name="b", regularizable=False)

    self.nonlinearity = nonlinearity
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        if apply_nl:
            ps = nonlinearities.sigmoid(input)
        prod = T.prod(ps, axis=(1,2))
        output = 1 - prod
        return output
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        ps = nonlinearities.sigmoid(input)
        powd = ps ** self.exp
        tmean = T.mean(powd, axis=(1,2))
        return tmean
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):

        ps = nonlinearities.sigmoid(input)
        sum_p_r_benign = T.sum(ps,axis=1)
        sum_log = T.sum(T.log(1-ps+1.e-12),axis=1)
        return T.concatenate([sum_log, sum_p_r_benign])
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def exe_maxru(length, num_units, position, binominal):
    batch_size = BATCH_SIZE

    input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
    target_var = T.ivector(name='targets')

    layer_input = lasagne.layers.InputLayer(shape=(None, length, 1), input_var=input_var, name='input')

    time_updategate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None)

    time_update = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
                       b=lasagne.init.Constant(0.), nonlinearity=nonlinearities.tanh)

    resetgate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
                     W_cell=lasagne.init.GlorotUniform())

    updategate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
                      W_cell=lasagne.init.GlorotUniform())

    hiden_update = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
                        b=lasagne.init.Constant(0.), nonlinearity=nonlinearities.tanh)

    layer_taru = MAXRULayer(layer_input, num_units, max_length=length,
                            P_time=lasagne.init.GlorotUniform(), nonlinearity=nonlinearities.tanh,
                            resetgate=resetgate, updategate=updategate, hidden_update=hiden_update,
                            time_updategate=time_updategate, time_update=time_update,
                            only_return_final=True, name='MAXRU', p=0.)

    # W = layer_taru.W_hid_to_hidden_update.sum()
    # U = layer_taru.W_in_to_hidden_update.sum()
    # b = layer_taru.b_hidden_update.sum()

    layer_output = DenseLayer(layer_taru, num_units=1, nonlinearity=nonlinearities.sigmoid, name='output')

    return train(layer_output, input_var, target_var, batch_size, length, position, binominal)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def exe_lstm(use_embedd, length, num_units, position, binominal):
    batch_size = BATCH_SIZE

    input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
    target_var = T.ivector(name='targets')

    layer_input = lasagne.layers.InputLayer(shape=(None, length, 1), input_var=input_var, name='input')
    if use_embedd:
        layer_position = construct_position_input(batch_size, length, num_units)
        layer_input = lasagne.layers.concat([layer_input, layer_position], axis=2)

    ingate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
                  W_cell=lasagne.init.Uniform(range=0.1))

    outgate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
                   W_cell=lasagne.init.Uniform(range=0.1))
    # according to Jozefowicz et al.(2015), init bias of forget gate to 1.
    forgetgate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
                      W_cell=lasagne.init.Uniform(range=0.1), b=lasagne.init.Constant(1.))
    # now use tanh for nonlinear function of cell, need to try pure linear cell
    cell = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
                b=lasagne.init.Constant(0.), nonlinearity=nonlinearities.tanh)

    layer_lstm = LSTMLayer(layer_input, num_units, ingate=ingate, forgetgate=forgetgate, cell=cell, outgate=outgate,
                           peepholes=False, nonlinearity=nonlinearities.tanh, only_return_final=True, name='LSTM')

    # W = layer_lstm.W_hid_to_cell.sum()
    # U = layer_lstm.W_in_to_cell.sum()
    # b = layer_lstm.b_cell.sum()

    layer_output = DenseLayer(layer_lstm, num_units=1, nonlinearity=nonlinearities.sigmoid, name='output')

    return train(layer_output, layer_lstm, input_var, target_var, batch_size, length, position, binominal)
项目:NeuroNLP    作者:XuezheMax    | 项目源码 | 文件源码
def exe_gru(use_embedd, length, num_units, position, binominal, reset_input):
    batch_size = BATCH_SIZE

    input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
    target_var = T.ivector(name='targets')

    layer_input = lasagne.layers.InputLayer(shape=(batch_size, length, 1), input_var=input_var, name='input')
    if use_embedd:
        layer_position = construct_position_input(batch_size, length, num_units)
        layer_input = lasagne.layers.concat([layer_input, layer_position], axis=2)

    resetgate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None)

    updategate = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None)

    hiden_update = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
                        b=lasagne.init.Constant(0.), nonlinearity=nonlinearities.tanh)

    layer_gru = GRULayer_ANA(layer_input, num_units, resetgate=resetgate, updategate=updategate, hidden_update=hiden_update,
                         reset_input=reset_input, only_return_final=True, name='GRU')

    # W = layer_gru.W_hid_to_hidden_update.sum()
    # U = layer_gru.W_in_to_hidden_update.sum()
    # b = layer_gru.b_hidden_update.sum()

    layer_output = DenseLayer(layer_gru, num_units=1, nonlinearity=nonlinearities.sigmoid, name='output')

    return train(layer_output, layer_gru, input_var, target_var, batch_size, length, position, binominal)
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def calc_loss_multi(prediction, targets):

    #we need to clip predictions when calculating the log-loss
    prediction = T.clip(prediction, 0.0000001, 0.9999999)

    #binary crossentropy is the best choice for a multi-class sigmoid output
    loss = T.mean(objectives.binary_crossentropy(prediction, targets))

    return loss

#theano variable for the class targets
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def build_tempral_model():
    net={}
    net['input']=InputLayer((None,24,2048))
    net['lstm1']=LSTMLayer(net['input'],256)
    net['fc']=DenseLayer(net['lstm1'],num_units=12,nonlinearity=sigmoid)

    return net
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def build_model():
    net = {}
    net['input'] = InputLayer((None, 512*20, 3, 3))

    au_fc_layers=[]
    for i in range(20):
        net['roi_AU_N_'+str(i)]=SliceLayer(net['input'],indices=slice(i*512,(i+1)*512),axis=1)

        #try to adding upsampling here for more conv

        net['Roi_upsample_'+str(i)]=Upscale2DLayer(net['roi_AU_N_'+str(i)],scale_factor=2)

        net['conv_roi_'+str(i)]=ConvLayer(net['Roi_upsample_'+str(i)],512,3)

        net['au_fc_'+str(i)]=DenseLayer(net['conv_roi_'+str(i)],num_units=150)

        au_fc_layers+=[net['au_fc_'+str(i)]]

    #
    net['local_fc']=concat(au_fc_layers)
    net['local_fc2']=DenseLayer(net['local_fc'],num_units=2048)

    net['local_fc_dp']=DropoutLayer(net['local_fc2'],p=0.5)


    # net['fc_comb']=concat([net['au_fc_layer'],net['local_fc_dp']])


    # net['fc_dense']=DenseLayer(net['fc_comb'],num_units=1024)

    # net['fc_dense_dp']=DropoutLayer(net['fc_dense'],p=0.3)

    net['real_out']=DenseLayer(net['local_fc_dp'],num_units=12,nonlinearity=sigmoid)


    # net['final']=concat([net['pred_pos_layer'],net['output_layer']])

    return net
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 128*7*7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
                                     output_size=14))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_generator(input_var=None, verbose=False):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # # fully-connected layer
    # layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 1024*4*4))
    layer = ReshapeLayer(layer, ([0], 1024, 4, 4))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 512, 5, stride=2, crop='same',
                                     output_size=8))
    layer = batch_norm(Deconv2DLayer(layer, 256, 5, stride=2, crop='same',
                                     output_size=16))
    layer = Deconv2DLayer(layer, 3, 5, stride=2, crop='same', output_size=32,
                          nonlinearity=sigmoid)
    if verbose: print ("Generator output:", layer.output_shape)
    return layer
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 128*7*7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
                                     output_size=14))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        assert input.ndim == 2
        activation = T.dot(input, self.C)
        if self.b is not None:
            activation = activation + self.b.dimshuffle('x', 0)
        return self.nonlinearity_final(nonlinearities.sigmoid(activation).dot(self.M))
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        activation = T.dot(input, self.C)
        if self.b is not None:
            activation = activation + self.b.dimshuffle('x', 0)
        return nonlinearities.sigmoid(activation)
项目:MEM_DGM    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, u_net, z_net,
                 nonlinearity=nonlinearities.sigmoid,
                 nonlinearity_final=nonlinearities.identity, **kwargs):
        super(LadderCompositionLayer, self).__init__([u_net, z_net], **kwargs)

        u_shp, z_shp = self.input_shapes


        if not u_shp[-1] == z_shp[-1]:
            raise ValueError("last dimension of u and z  must be equal"
                             " u was %s, z was %s" % (str(u_shp), str(z_shp)))
        self.num_inputs = z_shp[-1]
        self.nonlinearity = nonlinearity
        self.nonlinearity_final = nonlinearity_final
        constant = init.Constant
        self.a1 = self.add_param(constant(0.), (self.num_inputs,), name="a1")
        self.a2 = self.add_param(constant(1.), (self.num_inputs,), name="a2")
        self.a3 = self.add_param(constant(0.), (self.num_inputs,), name="a3")
        self.a4 = self.add_param(constant(0.), (self.num_inputs,), name="a4")

        self.c1 = self.add_param(constant(0.), (self.num_inputs,), name="c1")
        self.c2 = self.add_param(constant(1.), (self.num_inputs,), name="c2")
        self.c3 = self.add_param(constant(0.), (self.num_inputs,), name="c3")

        self.c4 = self.add_param(constant(0.), (self.num_inputs,), name="c4")

        self.b1 = self.add_param(constant(0.), (self.num_inputs,),
                                 name="b1", regularizable=False)
项目:DynamicMemoryNetworks    作者:swstarlab    | 项目源码 | 文件源码
def __init__(self, W_in=Normal(0.1), W_hid=Normal(0.1),
                 b=Constant(0.), nonlinearity=nonlin.sigmoid):
        self.W_in  = W_in
        self.W_hid = W_hid
        self.b     = b
        if nonlinearity is None:
            self.nonlinearity = nonlin.identity
        else:
            self.nonlinearity = nonlinearity
项目:AttentionNet    作者:sayvazov    | 项目源码 | 文件源码
def get_output_for(self, arguments, **kwargs):
        input, hprev, Cprev = arguments
        i = nl.sigmoid(self.Wi * input + self.Ui* hprev + self.bi)
        cand = nl.tanh(self.Wc *input + self.Uc * hprev + self.bc)
        f = nl.sigmoid(self.Wf*input + self.Uf*hprev + self.bf)
        C = i*cand + f * Cprev
        o = nl.sigmoid(self.Wo*input + self.Uo*hprev + self.Vo*C + self.bo)
        h = o*nl.tanh(C)
        return h, C
项目:StockPredictor    作者:wallsbreaker    | 项目源码 | 文件源码
def build_combination(input_var, output_nodes, input_size, stocks, period, feature_types):
    # Input layer
    input_layer = InputLayer(shape=(None, 1, input_size), input_var=input_var)
    assert input_size == stocks * period * feature_types
    input_layer = ReshapeLayer(input_layer, (([0], stocks, period, feature_types)))

    #slice for partition
    stock_feature_type_layers = []
    for ix in range(stocks):
        stock_layer = SliceLayer(input_layer, indices=ix, axis=1)
        this_stock_feature_type_layers = []
        for rx in range(feature_types):
            this_stock_feature_type_layers.append(SliceLayer(stock_layer, indices=rx, axis=1))
        stock_feature_type_layers.append(this_stock_feature_type_layers)

    stock_networks = []
    for this_stock_feature_type_layers in stock_feature_type_layers:
        this_stock_networks = []
        for feature_type_layer in this_stock_feature_type_layers:
            tmp = DenseLayer(dropout(feature_type_layer, p=.2),
                num_units=10, nonlinearity=tanh)
            tmp = DenseLayer(dropout(tmp, p=.5), num_units=1, nonlinearity=tanh)
            this_stock_networks.append(tmp)

        this_stock_network = ConcatLayer(this_stock_networks)

        stock_network = DenseLayer(dropout(this_stock_network, p=.5),
                num_units=1, nonlinearity=tanh)

        stock_networks.append(stock_network)

    network = ConcatLayer(stock_networks)
    network = DenseLayer(dropout(network, p=.5),
                num_units=output_nodes, nonlinearity=sigmoid)

    return network, stock_networks
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_pretrained_encoder(weights, biases, incoming):
    l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
    l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
    l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
    l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
    return l_4
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_pretrained_encoder(weights, biases, incoming):
    l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
    l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
    l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
    l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
    return l_4
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_pretrained_encoder(weights, biases, incoming):
    l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
    l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
    l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
    l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
    return l_4
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_pretrained_encoder(weights, biases, incoming):
    l_1 = DenseLayer(incoming, 2000, W=weights[0], b=biases[0], nonlinearity=sigmoid, name='fc1')
    l_2 = DenseLayer(l_1, 1000, W=weights[1], b=biases[1], nonlinearity=sigmoid, name='fc2')
    l_3 = DenseLayer(l_2, 500, W=weights[2], b=biases[2], nonlinearity=sigmoid, name='fc3')
    l_4 = DenseLayer(l_3, 50, W=weights[3], b=biases[3], nonlinearity=linear, name='encoder')
    return l_4
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def load_finetuned_dbn(path):
    """
    Load a fine tuned Deep Belief Net from file
    :param path: path to deep belief net parameters
    :return: deep belief net
    """
    dbn = NeuralNet(
        layers=[
            ('input', las.layers.InputLayer),
            ('l1', las.layers.DenseLayer),
            ('l2', las.layers.DenseLayer),
            ('l3', las.layers.DenseLayer),
            ('l4', las.layers.DenseLayer),
            ('l5', las.layers.DenseLayer),
            ('l6', las.layers.DenseLayer),
            ('l7', las.layers.DenseLayer),
            ('output', las.layers.DenseLayer)
        ],
        input_shape=(None, 1200),
        l1_num_units=2000, l1_nonlinearity=sigmoid,
        l2_num_units=1000, l2_nonlinearity=sigmoid,
        l3_num_units=500, l3_nonlinearity=sigmoid,
        l4_num_units=50, l4_nonlinearity=linear,
        l5_num_units=500, l5_nonlinearity=sigmoid,
        l6_num_units=1000, l6_nonlinearity=sigmoid,
        l7_num_units=2000, l7_nonlinearity=sigmoid,
        output_num_units=1200, output_nonlinearity=linear,
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    with open(path, 'rb') as f:
        pretrained_nn = pickle.load(f)
    if pretrained_nn is not None:
        dbn.load_params_from(path)
    return dbn
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def load_encoder(path):
    """
        load a pretrained dbn from path
        :param path: path to the .mat dbn
        :return: pretrained unrolled encoder
        """
    # create the network using weights from pretrain_nn.mat
    nn = sio.loadmat(path)
    w1 = nn['w1']
    w2 = nn['w2']
    w3 = nn['w3']
    w4 = nn['w4']
    b1 = nn['b1'][0]
    b2 = nn['b2'][0]
    b3 = nn['b3'][0]
    b4 = nn['b4'][0]

    encoder = NeuralNet(
        layers=[
            (InputLayer, {'name': 'input', 'shape': (None, 1200)}),
            (DenseLayer, {'name': 'l1', 'num_units': 2000, 'nonlinearity': sigmoid,
                          'W': w1, 'b': b1}),
            (DenseLayer, {'name': 'l2', 'num_units': 1000, 'nonlinearity': sigmoid,
                          'W': w2, 'b': b2}),
            (DenseLayer, {'name': 'l3', 'num_units': 500, 'nonlinearity': sigmoid,
                          'W': w3, 'b': b3}),
            (DenseLayer, {'name': 'l4', 'num_units': 50, 'nonlinearity': linear,
                          'W': w4, 'b': b4}),
        ],
        update=nesterov_momentum,
        update_learning_rate=0.001,
        update_momentum=0.5,
        objective_l2=0.005,
        verbose=1,
        regression=True
    )
    encoder.initialize()
    return encoder