Python lasagne.layers 模块,SliceLayer() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用lasagne.layers.SliceLayer()

项目:deep_learning    作者:Vict0rSch    | 项目源码 | 文件源码
def build_model(input_var=None):

    layers = [1, 5, 10, 1]

    l_in = InputLayer((None, None, layers[0]),
                      input_var=input_var)

    l_lstm1 = LSTMLayer(l_in, layers[1])
    l_lstm1_dropout = DropoutLayer(l_lstm1, p=0.2)

    l_lstm2 = LSTMLayer(l_lstm1_dropout, layers[2])
    l_lstm2_dropout = DropoutLayer(l_lstm2, p=0.2)

    # The objective of this task depends only on the final value produced by
    # the network.  So, we'll use SliceLayers to extract the LSTM layer's
    # output after processing the entire input sequence.  For the forward
    # layer, this corresponds to the last value of the second (sequence length)
    # dimension.
    l_slice = SliceLayer(l_lstm2_dropout, -1, 1)

    l_out = DenseLayer(l_slice, 1, nonlinearity=lasagne.nonlinearities.linear)

    return l_out
项目:deep_learning    作者:Vict0rSch    | 项目源码 | 文件源码
def build_model(input_var=None):

    layers = [1, 5, 10, 1]

    l_in = InputLayer((None, None, layers[0]),
                      input_var=input_var)

    l_lstm1 = LSTMLayer(l_in, layers[1])
    l_lstm1_dropout = DropoutLayer(l_lstm1, p=0.2)

    l_lstm2 = LSTMLayer(l_lstm1_dropout, layers[2])
    l_lstm2_dropout = DropoutLayer(l_lstm2, p=0.2)

    # The objective of this task depends only on the final value produced by
    # the network.  So, we'll use SliceLayers to extract the LSTM layer's
    # output after processing the entire input sequence.  For the forward
    # layer, this corresponds to the last value of the second (sequence length)
    # dimension.
    l_slice = SliceLayer(l_lstm2_dropout, -1, 1)

    l_out = DenseLayer(l_slice, 1, nonlinearity=lasagne.nonlinearities.linear)

    return l_out
项目:twitter_caption    作者:tencia    | 项目源码 | 文件源码
def build_rnn(conv_input_var, seq_input_var, conv_shape, word_dims, n_hid, lstm_layers):
    ret = {}
    ret['seq_input'] = seq_layer = InputLayer((None, None, word_dims), input_var=seq_input_var)
    batchsize, seqlen, _ = seq_layer.input_var.shape
    ret['seq_resh'] = seq_layer = ReshapeLayer(seq_layer, shape=(-1, word_dims))
    ret['seq_proj'] = seq_layer = DenseLayer(seq_layer, num_units=n_hid)
    ret['seq_resh2'] = seq_layer = ReshapeLayer(seq_layer, shape=(batchsize, seqlen, n_hid))
    ret['conv_input'] = conv_layer = InputLayer(conv_shape, input_var = conv_input_var)
    ret['conv_proj'] = conv_layer = DenseLayer(conv_layer, num_units=n_hid)
    ret['conv_resh'] = conv_layer = ReshapeLayer(conv_layer, shape=([0], 1, -1))
    ret['input_concat'] = layer = ConcatLayer([conv_layer, seq_layer], axis=1)
    for lstm_layer_idx in xrange(lstm_layers):
        ret['lstm_{}'.format(lstm_layer_idx)] = layer = LSTMLayer(layer, n_hid)
    ret['out_resh'] = layer = ReshapeLayer(layer, shape=(-1, n_hid))
    ret['output_proj'] = layer = DenseLayer(layer, num_units=word_dims, nonlinearity=log_softmax)
    ret['output'] = layer = ReshapeLayer(layer, shape=(batchsize, seqlen+1, word_dims))
    ret['output'] = layer = SliceLayer(layer, indices=slice(None, -1), axis=1)
    return ret

# originally from
# https://github.com/Lasagne/Recipes/blob/master/examples/styletransfer/Art%20Style%20Transfer.ipynb
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_PadLayer(self, layer, feeder):
        assert isinstance(layer, L.PadLayer)
        assert layer.batch_ndim == 2
        assert len(L.get_output_shape(layer))==4.

        tmp = L.SliceLayer(feeder,
                           slice(layer.width[0][0], -layer.width[0][1]),
                           axis=2)
        return L.SliceLayer(tmp,
                            slice(layer.width[1][0], -layer.width[1][1]),
                            axis=3)
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_layer(self, layer, feeder):
        layer_type = type(layer)

        if L.get_output_shape(feeder) != L.get_output_shape(layer):
            feeder = L.ReshapeLayer(feeder, (-1,)+L.get_output_shape(layer)[1:])
        if layer_type is L.InputLayer:
            return self._invert_InputLayer(layer, feeder)
        elif layer_type is L.FlattenLayer:
            return self._invert_FlattenLayer(layer, feeder)
        elif layer_type is L.DenseLayer:
            return self._invert_DenseLayer(layer, feeder)
        elif layer_type is L.Conv2DLayer:
            return self._invert_Conv2DLayer(layer, feeder)
        elif layer_type is L.DropoutLayer:
            return self._invert_DropoutLayer(layer, feeder)
        elif layer_type in [L.MaxPool2DLayer, L.MaxPool1DLayer]:
            return self._invert_MaxPoolingLayer(layer, feeder)
        elif layer_type is L.PadLayer:
            return self._invert_PadLayer(layer, feeder)
        elif layer_type is L.SliceLayer:
            return self._invert_SliceLayer(layer, feeder)
        elif layer_type is L.LocalResponseNormalization2DLayer:
            return self._invert_LocalResponseNormalisation2DLayer(layer, feeder)
        elif layer_type is L.GlobalPoolLayer:
            return self._invert_GlobalPoolLayer(layer, feeder)
        else:
            return self._invert_UnknownLayer(layer, feeder)
项目:Deopen    作者:kimmo1019    | 项目源码 | 文件源码
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer16, num_units=256)
    network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
    return network


#random search to initialize the weights
项目:Deopen    作者:kimmo1019    | 项目源码 | 文件源码
def create_network():
    l = 1000
    pool_size = 5
    test_size1 = 13
    test_size2 = 7
    test_size3 = 5
    kernel1 = 128
    kernel2 = 128
    kernel3 = 128
    layer1 = InputLayer(shape=(None, 1, 4, l+1024))
    layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
    layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
    layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
    layer2_f = FlattenLayer(layer2_3)
    layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
    layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
    layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
    layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
    layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
    layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
    layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
    layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
    layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
    layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
    layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
    layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
    layer14_d = DenseLayer(layer14, num_units= 256)
    layer3_2 = DenseLayer(layer2_f, num_units = 128)
    layer15 = ConcatLayer([layer14_d,layer3_2])
    #layer16 = DropoutLayer(layer15,p=0.5)
    layer17 = DenseLayer(layer15, num_units=256)
    network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
    return network


#random search to initialize the weights
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def build_model():
    net = {}
    net['input'] = InputLayer((None, 512*20, 3, 3))

    au_fc_layers=[]
    for i in range(20):
        net['roi_AU_N_'+str(i)]=SliceLayer(net['input'],indices=slice(i*512,(i+1)*512),axis=1)

        #try to adding upsampling here for more conv

        net['Roi_upsample_'+str(i)]=Upscale2DLayer(net['roi_AU_N_'+str(i)],scale_factor=2)

        net['conv_roi_'+str(i)]=ConvLayer(net['Roi_upsample_'+str(i)],512,3)

        net['au_fc_'+str(i)]=DenseLayer(net['conv_roi_'+str(i)],num_units=150)

        au_fc_layers+=[net['au_fc_'+str(i)]]

    #
    net['local_fc']=concat(au_fc_layers)
    net['local_fc2']=DenseLayer(net['local_fc'],num_units=2048)

    net['local_fc_dp']=DropoutLayer(net['local_fc2'],p=0.5)


    # net['fc_comb']=concat([net['au_fc_layer'],net['local_fc_dp']])


    # net['fc_dense']=DenseLayer(net['fc_comb'],num_units=1024)

    # net['fc_dense_dp']=DropoutLayer(net['fc_dense'],p=0.3)

    net['real_out']=DenseLayer(net['local_fc_dp'],num_units=12,nonlinearity=sigmoid)


    # net['final']=concat([net['pred_pos_layer'],net['output_layer']])

    return net
项目:StockPredictor    作者:wallsbreaker    | 项目源码 | 文件源码
def build_combination(input_var, output_nodes, input_size, stocks, period, feature_types):
    # Input layer
    input_layer = InputLayer(shape=(None, 1, input_size), input_var=input_var)
    assert input_size == stocks * period * feature_types
    input_layer = ReshapeLayer(input_layer, (([0], stocks, period, feature_types)))

    #slice for partition
    stock_feature_type_layers = []
    for ix in range(stocks):
        stock_layer = SliceLayer(input_layer, indices=ix, axis=1)
        this_stock_feature_type_layers = []
        for rx in range(feature_types):
            this_stock_feature_type_layers.append(SliceLayer(stock_layer, indices=rx, axis=1))
        stock_feature_type_layers.append(this_stock_feature_type_layers)

    stock_networks = []
    for this_stock_feature_type_layers in stock_feature_type_layers:
        this_stock_networks = []
        for feature_type_layer in this_stock_feature_type_layers:
            tmp = DenseLayer(dropout(feature_type_layer, p=.2),
                num_units=10, nonlinearity=tanh)
            tmp = DenseLayer(dropout(tmp, p=.5), num_units=1, nonlinearity=tanh)
            this_stock_networks.append(tmp)

        this_stock_network = ConcatLayer(this_stock_networks)

        stock_network = DenseLayer(dropout(this_stock_network, p=.5),
                num_units=1, nonlinearity=tanh)

        stock_networks.append(stock_network)

    network = ConcatLayer(stock_networks)
    network = DenseLayer(dropout(network, p=.5),
                num_units=output_nodes, nonlinearity=sigmoid)

    return network, stock_networks
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def compile_delta_features():
    # create input
    input_var = T.tensor3('input', dtype='float32')
    win_var = T.iscalar('theta')
    weights, biases = autoencoder.load_dbn()

    '''
    activations = [sigmoid, sigmoid, sigmoid, linear, sigmoid, sigmoid, sigmoid, linear]
    layersizes = [2000, 1000, 500, 50, 500, 1000, 2000, 1200]
    ae = autoencoder.create_model(l_input, weights, biases, activations, layersizes)
    print_network(ae)
    reconstruct = las.layers.get_output(ae)
    reconstruction_fn = theano.function([input_var], reconstruct, allow_input_downcast=True)
    recon_img = reconstruction_fn(test_data_resized)
    visualize_reconstruction(test_data_resized[225:250], recon_img[225:250])
    '''
    l_input = InputLayer((None, None, 1200), input_var, name='input')

    symbolic_batchsize = l_input.input_var.shape[0]
    symbolic_seqlen = l_input.input_var.shape[1]
    en_activations = [sigmoid, sigmoid, sigmoid, linear]
    en_layersizes = [2000, 1000, 500, 50]

    l_reshape1 = ReshapeLayer(l_input, (-1, l_input.shape[-1]), name='reshape1')
    l_encoder = autoencoder.create_model(l_reshape1, weights[:4], biases[:4], en_activations, en_layersizes)
    encoder_len = las.layers.get_output_shape(l_encoder)[-1]
    l_reshape2 = ReshapeLayer(l_encoder, (symbolic_batchsize, symbolic_seqlen, encoder_len), name='reshape2')
    l_delta = DeltaLayer(l_reshape2, win_var, name='delta')
    l_slice = SliceLayer(l_delta, indices=slice(50, None), axis=-1, name='slice')  # extract the delta coefficients
    l_reshape3 = ReshapeLayer(l_slice, (-1, l_slice.output_shape[-1]), name='reshape3')
    print_network(l_reshape3)

    delta_features = las.layers.get_output(l_reshape3)
    delta_fn = theano.function([input_var, win_var], delta_features, allow_input_downcast=True)

    return delta_fn
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_model(input_shape, input_var, mask_shape, mask_var, lstm_size=250, output_classes=26,
                 w_init=las.init.Orthogonal()):
    gate_parameters = Gate(
        W_in=w_init, W_hid=w_init,
        b=las.init.Constant(0.))
    cell_parameters = Gate(
        W_in=w_init, W_hid=w_init,
        # Setting W_cell to None denotes that no cell connection will be used.
        W_cell=None, b=las.init.Constant(0.),
        # By convention, the cell nonlinearity is tanh in an LSTM.
        nonlinearity=tanh)

    l_in = InputLayer(input_shape, input_var, 'input')
    l_mask = InputLayer(mask_shape, mask_var, 'mask')

    f_lstm, b_lstm = create_blstm(l_in, l_mask, lstm_size, cell_parameters, gate_parameters, 'lstm')

    l_sum = ElemwiseSumLayer([f_lstm, b_lstm], name='sum')
    l_forward_slice1 = SliceLayer(l_sum, -1, 1, name='slice1')

    # Now, we can apply feed-forward layers as usual.
    # We want the network to predict a classification for the sequence,
    # so we'll use a the number of classes.
    l_out = DenseLayer(
        l_forward_slice1, num_units=output_classes, nonlinearity=las.nonlinearities.softmax, name='output')

    return l_out
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_Conv2DLayer(self,layer,feeder):
        # Warning they are swapped here
        feeder = self._put_rectifiers(feeder,layer)
        feeder = self._get_normalised_relevance_layer(layer,feeder)

        f_s = layer.filter_size
        if layer.pad == 'same':
            pad = 'same'
        elif layer.pad == 'valid' or layer.pad == (0, 0):
            pad = 'full'
        else:
            raise RuntimeError("Define your padding as full or same.")

        # By definition the
        # Flip filters must be on to be a proper deconvolution.
        num_filters = L.get_output_shape(layer.input_layer)[1]
        if layer.stride == (4,4):
            # Todo: similar code gradient based explainers. Merge.
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1,
                                         pad=pad,
                                         nonlinearity=None,
                                         b=None,
                                         flip_filters=True)
            conv_layer = output_layer
            tmp = L.SliceLayer(output_layer, slice(0, -3), axis=3)
            output_layer = L.SliceLayer(tmp, slice(0, -3), axis=2)
            output_layer.W = conv_layer.W
        else:
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1,
                                         pad=pad,
                                         nonlinearity=None,
                                         b=None,
                                         flip_filters=True)
        W = output_layer.W

        # Do the multiplication.
        x_layer = L.ReshapeLayer(layer.input_layer,
                                 (-1,)+L.get_output_shape(output_layer)[1:])
        output_layer = L.ElemwiseMergeLayer(incomings=[x_layer, output_layer],
                                            merge_function=T.mul)
        output_layer.W = W
        return output_layer
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def _invert_Conv2DLayer(self, layer, feeder):
        def _check_padding_same():
            for s, p in zip(layer.filter_size, layer.pad):
                if s % 2 != 1:
                    return False
                elif s//2 != p:
                    return False
            return True

        # Warning they are swapped here.
        feeder = self._put_rectifiers(feeder,layer)

        f_s = layer.filter_size
        if layer.pad == 'same' or _check_padding_same():
            pad = 'same'
        elif layer.pad == 'valid' or layer.pad == (0, 0):
            pad = 'full'
        else:
            raise RuntimeError("Define your padding as full or same.")

        # By definition the
        # Flip filters must be on to be a proper deconvolution.

        num_filters = L.get_output_shape(layer.input_layer)[1]
        if layer.stride == (4,4):
            # Todo: clean this!
            print("Applying alexnet hack.")
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
            print("Applying alexnet hack part 2.")
            conv_layer = output_layer
            output_layer = L.SliceLayer(L.SliceLayer(output_layer,
                                                     slice(0,-3), axis=3),
                                        slice(0,-3), axis=2)
            output_layer.W = conv_layer.W
        elif layer.stride == (2,2):
            # Todo: clean this! Seems to be the same code as for AlexNet above.
            print("Applying GoogLeNet hack.")
            feeder = L.Upscale2DLayer(feeder, layer.stride, mode='dilate')
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
        else:
            # Todo: clean this. Repetitions all over.
            output_layer = L.Conv2DLayer(feeder,
                                         num_filters=num_filters,
                                         filter_size=f_s,
                                         stride=1, pad=pad,
                                         nonlinearity=None, b=None,
                                         flip_filters=True)
        return output_layer
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_model(dbn, input_shape, input_var, mask_shape, mask_var,
                 lstm_size=250, output_classes=26):

    dbn_layers = dbn.get_all_layers()
    weights = []
    biases = []
    weights.append(dbn_layers[1].W.astype('float32'))
    weights.append(dbn_layers[2].W.astype('float32'))
    weights.append(dbn_layers[3].W.astype('float32'))
    weights.append(dbn_layers[4].W.astype('float32'))
    biases.append(dbn_layers[1].b.astype('float32'))
    biases.append(dbn_layers[2].b.astype('float32'))
    biases.append(dbn_layers[3].b.astype('float32'))
    biases.append(dbn_layers[4].b.astype('float32'))

    gate_parameters = Gate(
        W_in=las.init.Orthogonal(), W_hid=las.init.Orthogonal(),
        b=las.init.Constant(0.))
    cell_parameters = Gate(
        W_in=las.init.Orthogonal(), W_hid=las.init.Orthogonal(),
        # Setting W_cell to None denotes that no cell connection will be used.
        W_cell=None, b=las.init.Constant(0.),
        # By convention, the cell nonlinearity is tanh in an LSTM.
        nonlinearity=tanh)

    l_in = InputLayer(input_shape, input_var, 'input')
    l_mask = InputLayer(mask_shape, mask_var, 'mask')

    symbolic_batchsize = l_in.input_var.shape[0]
    symbolic_seqlen = l_in.input_var.shape[1]

    l_reshape1 = ReshapeLayer(l_in, (-1, input_shape[-1]), name='reshape1')
    l_encoder = create_pretrained_encoder(weights, biases, l_reshape1)
    encoder_len = las.layers.get_output_shape(l_encoder)[-1]
    l_reshape2 = ReshapeLayer(l_encoder, (symbolic_batchsize, symbolic_seqlen, encoder_len), name='reshape2')
    # l_delta = DeltaLayer(l_reshape2, win, name='delta')

    # l_lstm = create_lstm(l_reshape2, l_mask, lstm_size, cell_parameters, gate_parameters, 'lstm1')
    l_lstm, l_lstm_back = create_blstm(l_reshape2, l_mask, lstm_size, cell_parameters, gate_parameters, 'lstm1')

    # We'll combine the forward and backward layer output by summing.
    # Merge layers take in lists of layers to merge as input.
    l_sum1 = ElemwiseSumLayer([l_lstm, l_lstm_back], name='sum1')

    l_forward_slice1 = SliceLayer(l_sum1, -1, 1, name='slice1')

    # Now, we can apply feed-forward layers as usual.
    # We want the network to predict a classification for the sequence,
    # so we'll use a the number of classes.
    l_out = DenseLayer(
        l_forward_slice1, num_units=output_classes, nonlinearity=las.nonlinearities.softmax, name='output')

    return l_out
项目:ip-avsr    作者:lzuwei    | 项目源码 | 文件源码
def create_model_using_pretrained_encoder(weights, biases, input_shape, input_var, mask_shape, mask_var,
                                          lstm_size=250, win=T.iscalar('theta'), output_classes=26,
                                          w_init_fn=las.init.Orthogonal(),
                                          use_peepholes=False, nonlinearities=rectify):
    gate_parameters = Gate(
        W_in=w_init_fn, W_hid=w_init_fn,
        b=las.init.Constant(0.))
    cell_parameters = Gate(
        W_in=w_init_fn, W_hid=w_init_fn,
        # Setting W_cell to None denotes that no cell connection will be used.
        W_cell=None, b=las.init.Constant(0.),
        # By convention, the cell nonlinearity is tanh in an LSTM.
        nonlinearity=tanh)

    l_in = InputLayer(input_shape, input_var, 'input')
    l_mask = InputLayer(mask_shape, mask_var, 'mask')

    symbolic_batchsize = l_in.input_var.shape[0]
    symbolic_seqlen = l_in.input_var.shape[1]

    l_reshape1 = ReshapeLayer(l_in, (-1, input_shape[-1]), name='reshape1')
    l_encoder = create_pretrained_encoder(l_reshape1, weights, biases,
                                          [2000, 1000, 500, 50],
                                          [nonlinearities, nonlinearities, nonlinearities, linear],
                                          ['fc1', 'fc2', 'fc3', 'bottleneck'])
    encoder_len = las.layers.get_output_shape(l_encoder)[-1]
    l_reshape2 = ReshapeLayer(l_encoder, (symbolic_batchsize, symbolic_seqlen, encoder_len), name='reshape2')
    l_delta = DeltaLayer(l_reshape2, win, name='delta')

    l_lstm, l_lstm_back = create_blstm(l_delta, l_mask, lstm_size, cell_parameters, gate_parameters, 'bstm1',
                                       use_peepholes)

    # We'll combine the forward and backward layer output by summing.
    # Merge layers take in lists of layers to merge as input.
    l_sum1 = ElemwiseSumLayer([l_lstm, l_lstm_back], name='sum1')

    l_forward_slice1 = SliceLayer(l_sum1, -1, 1, name='slice1')

    # Now, we can apply feed-forward layers as usual.
    # We want the network to predict a classification for the sequence,
    # so we'll use a the number of classes.
    l_out = DenseLayer(
        l_forward_slice1, num_units=output_classes, nonlinearity=las.nonlinearities.softmax, name='output')

    return l_out