Python keras.layers.recurrent 模块,LSTM 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.recurrent.LSTM

项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def attention_bnorm_model(hidden_size, glove):

    prem_input = Input(shape=(None,), dtype='int32')
    hypo_input = Input(shape=(None,), dtype='int32')

    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, None)(hypo_input)
    premise_layer = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid')(prem_embeddings)
    premise_bn = BatchNormalization()(premise_layer)
    hypo_layer = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid')(hypo_embeddings)
    hypo_bn = BatchNormalization()(hypo_layer)
    attention = LstmAttentionLayer(output_dim = hidden_size) ([hypo_bn, premise_bn])
    att_bn = BatchNormalization()(attention)
    final_dense = Dense(3, activation='softmax')(att_bn)

    model = Model(input=[prem_input, hypo_input], output=final_dense)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def lstm_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        output = Bidirectional(LSTM(self.opt['units_lstm'], activation='tanh',
                                      kernel_regularizer=l2(self.opt['regul_coef_lstm']),
                                      dropout=self.opt['dropout_rate']))(embed_input)

        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
项目:DeepLearning    作者:STHSF    | 项目源码 | 文件源码
def build_model(layers):
    model = Sequential()

    model.add(LSTM(
        input_dim=layers[0],
        output_dim=layers[1],
        return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(
        layers[2],
        return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(
        output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_masking_layer():
    ''' This test based on a previously failing issue here:
    https://github.com/fchollet/keras/issues/1567

    '''
    I = np.random.random((6, 3, 4))
    V = np.abs(np.random.random((6, 3, 5)))
    V /= V.sum(axis=-1, keepdims=True)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.LSTM(output_dim=5, return_sequences=True, unroll=False))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(I, V, nb_epoch=1, batch_size=100, verbose=1)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.LSTM(output_dim=5, return_sequences=True, unroll=True))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(I, V, nb_epoch=1, batch_size=100, verbose=1)
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model(layers):
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=layers[1], input_shape=(layers[1], layers[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(layers[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=layers[3]))
    model.add(Activation("tanh"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:Quantrade    作者:quant-trade    | 项目源码 | 文件源码
def __init__(self, sizes,
                 cell       = RNNCell.LSTM,
                 dropout    = 0.2,
                 activation = 'linear',
                 loss       = 'mse',
                 optimizer  = 'rmsprop'): #beta_1
        self.model = Sequential()

        self.model.add(cell(
            input_dim        = sizes[0],
            output_dim       = sizes[1],
            return_sequences = True
        ))

        for i in range(2, len(sizes) - 1):
            self.model.add(cell(sizes[i], return_sequences = False))
            self.model.add(Dropout(dropout))

        self.model.add(Dense(output_dim = sizes[-1]))
        self.model.add(Activation(activation))

        self.model.compile(loss=loss, optimizer=optimizer)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_simple_rnn_model(timestep,input_dim,output_dim,dropout=0.4,lr=0.001):
    input = Input((timestep,input_dim))
    # LSTM, Single
    output = LSTM(50,return_sequences=False)(input)
    # for _ in range(1):
    #     output = LSTM(32,return_sequences=True)(output)
    # output = LSTM(50,return_sequences=False)(output)
    output = Dropout(dropout)(output)
    output = Dense(output_dim)(output)

    model =  Model(inputs=input,outputs=output)

    optimizer = Adam(lr=lr)

    model.compile(loss='mae',optimizer=optimizer,metrics=['mse'])

    return model
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def attention_model(hidden_size, glove):

    prem_input = Input(shape=(None,), dtype='int32')
    hypo_input = Input(shape=(None,), dtype='int32')

    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, None)(hypo_input)
    premise_layer = LSTM(output_dim=hidden_size, return_sequences=True, 
                            inner_activation='sigmoid')(prem_embeddings)
    hypo_layer = LSTM(output_dim=hidden_size, return_sequences=True, 
                            inner_activation='sigmoid')(hypo_embeddings)    
    attention = LstmAttentionLayer(output_dim = hidden_size) ([hypo_layer, premise_layer])
    final_dense = Dense(3, activation='softmax')(attention)

    model = Model(input=[prem_input, hypo_input], output=final_dense)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
项目:Deep-Forecast    作者:amirstar    | 项目源码 | 文件源码
def buildModelLSTM_3(self):
        model = Sequential()

        layers = [self.inOutVecDim, 57, 57 * 2, 32, self.inOutVecDim]
        model.add(LSTM(input_dim=layers[0], output_dim=layers[1],
            return_sequences=False))

        model.add(Dense(
            output_dim=layers[4]))

        model.add(Activation(self.activation))

        optimizer = keras.optimizers.RMSprop(lr=0.001)
        model.compile(loss="mae", optimizer=optimizer)

        return model
项目:Deep-Forecast    作者:amirstar    | 项目源码 | 文件源码
def buildModelLSTM_4(self):
        model = Sequential()

        layers = [self.inOutVecDim, 57, 57 * 2, 57, self.inOutVecDim]
        model.add(LSTM(input_dim=layers[0], output_dim=layers[1],
            return_sequences=True))

        model.add(LSTM(layers[2],
            return_sequences=False))

        model.add(Dense(output_dim=layers[4]))

        model.add(Activation(self.activation))

        optimizer = keras.optimizers.RMSprop(lr=0.001)
        model.compile(loss="mae", optimizer=optimizer)

        return model
项目:Deep-Forecast    作者:amirstar    | 项目源码 | 文件源码
def run(self):
        #  training
        xTrain, yTrain = self.loadData_1()
        print ' Training LSTM 1 ...'
        self.lstmModels[0] = self.trainLSTM(xTrain, yTrain, 1)

        for modelInd in range(1,6):
            xTrain, yTrain = self.loadData(xTrain, yTrain, self.lstmModels[modelInd-1])
            print ' Training LSTM %s ...' % (modelInd+1)
            self.lstmModels[modelInd] = self.trainLSTM(xTrain, yTrain, modelInd+1)

        # testing
        print '...... TESTING  ...'
        self.test()

        self.drawGraphAllStations()
项目:algotrading    作者:alifanov    | 项目源码 | 文件源码
def get_model():
    model = Sequential()
    model.add(LSTM(
        32,
        input_shape=(look_back, 1),
        return_sequences=True
    ))
    model.add(Dropout(0.2))
    model.add(LSTM(
        64,
        return_sequences=False
    ))
    model.add(Dropout(0.2))
    model.add(Dense(1))
    model.add(Activation('linear'))
    model.compile(loss='mse', optimizer='adam')
    return model
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_regularizer(layer_class):
    layer = layer_class(output_dim, return_sequences=False, weights=None,
                        batch_input_shape=(nb_samples, timesteps, embedding_dim),
                        W_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        U_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        b_regularizer='l2')
    shape = (nb_samples, timesteps, embedding_dim)
    layer.build(shape)
    output = layer(K.variable(np.ones(shape)))
    K.eval(output)
    if layer_class == recurrent.SimpleRNN:
        assert len(layer.losses) == 3
    if layer_class == recurrent.GRU:
        assert len(layer.losses) == 9
    if layer_class == recurrent.LSTM:
        assert len(layer.losses) == 12
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_masking_layer():
    ''' This test based on a previously failing issue here:
    https://github.com/fchollet/keras/issues/1567

    '''
    I = np.random.random((6, 3, 4))
    V = np.abs(np.random.random((6, 3, 5)))
    V /= V.sum(axis=-1, keepdims=True)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.LSTM(output_dim=5, return_sequences=True, unroll=False))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(I, V, nb_epoch=1, batch_size=100, verbose=1)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.LSTM(output_dim=5, return_sequences=True, unroll=True))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(I, V, nb_epoch=1, batch_size=100, verbose=1)
项目:deep_learning    作者:Vict0rSch    | 项目源码 | 文件源码
def build_model():
    model = Sequential()
    layers = [1, 50, 100, 1]

    model.add(LSTM(
        layers[1],
        input_shape=(None, layers[0]),
        return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(
        layers[2],
        return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(
        layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print "Compilation Time : ", time.time() - start
    return model
项目:mlprojects-py    作者:srinathperera    | 项目源码 | 文件源码
def build_model():
    model = Sequential()
    layers = [2, 50, 100, 1]

    model.add(LSTM(
        input_dim=layers[0],
        output_dim=layers[1],
        return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(
        layers[2],
        return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(
        output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print "Compilation Time : ", time.time() - start
    return model
项目:rnn_agreement    作者:TalLinzen    | 项目源码 | 文件源码
def __init__(self, filename=None, serialization_dir=None,
                 batch_size=16, embedding_size=50, 
                 maxlen=50, prop_train=0.9, rnn_output_size=50,
                 mode='infreq_pos', vocab_file=filenames.vocab_file,
                 rnn_class=LSTM, equalize_classes=False, criterion=None,
                 verbose=1):
        '''
        filename: TSV file with positive examples, or None if unserializing
        criterion: dependencies that don't meet this criterion are excluded
            (set to None to keep all dependencies)
        verbose: passed to Keras (0 = no, 1 = progress bar, 2 = line per epoch)
        '''
        self.filename = filename
        self.vocab_file = vocab_file
        self.batch_size = batch_size
        self.embedding_size = embedding_size
        self.prop_train = prop_train
        self.mode = mode
        self.rnn_output_size = rnn_output_size
        self.rnn_class = rnn_class
        self.maxlen = maxlen
        self.equalize_classes = equalize_classes
        self.criterion = (lambda x: True) if criterion is None else criterion
        self.verbose = verbose
        self.set_serialization_dir(serialization_dir)
项目:StockRecommendSystem    作者:doncat99    | 项目源码 | 文件源码
def LSTM(self, argsDict):
        self.paras.batch_size             = argsDict["batch_size"]
        self.paras.model['dropout']       = argsDict['dropout']
        self.paras.model['activation']    = argsDict["activation"]
        self.paras.model['optimizer']     = argsDict["optimizer"]
        self.paras.model['learning_rate'] = argsDict["learning_rate"]

        print(self.paras.batch_size, self.paras.model['dropout'], self.paras.model['activation'], self.paras.model['optimizer'], self.paras.model['learning_rate'])

        model = self.lstm_model()
        model.fit(self.train_x, self.train_y,
              batch_size=self.paras.batch_size,
              epochs=self.paras.epoch,
              verbose=0,
              callbacks=[EarlyStopping(monitor='loss', patience=5)]
              )

        score, mse = model.evaluate(self.test_x, self.test_y, verbose=0)
        y_pred=model.predict(self.test_x)
        reca=Recall_s(self.test_y,y_pred)
        return -reca
项目:StockRecommendSystem    作者:doncat99    | 项目源码 | 文件源码
def plot_training_curve(self, history):
        #         %matplotlib inline
        #         %pylab inline
        #         pylab.rcParams['figure.figsize'] = (15, 9)   # Change the size of plots

        # LSTM training
        f, ax = plt.subplots()
        ax.plot(history.history['loss'])
        #ax.plot(history.history['val_loss'])
        ax.set_title('loss function')
        ax.set_ylabel('mse')
        ax.set_xlabel('epoch')
        #ax.legend(['loss', 'val_loss'], loc='upper right')
        ax.legend(['loss'], loc='upper right')
        plt.show()
        if self.paras.save == True:
            w = csv.writer(open(self.paras.save_folder + 'training_curve_model.txt', 'w'))
            for key, val in history.history.items():
                w.writerow([key, val])
            for key, val in history.params.items():
                w.writerow([key, val])

# Classification
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_regularizer(layer_class):
    layer = layer_class(output_dim, return_sequences=False, weights=None,
                        batch_input_shape=(nb_samples, timesteps, embedding_dim),
                        W_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        U_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        b_regularizer='l2')
    shape = (nb_samples, timesteps, embedding_dim)
    layer.build(shape)
    output = layer(K.variable(np.ones(shape)))
    K.eval(output)
    if layer_class == recurrent.SimpleRNN:
        assert len(layer.losses) == 3
    if layer_class == recurrent.GRU:
        assert len(layer.losses) == 9
    if layer_class == recurrent.LSTM:
        assert len(layer.losses) == 12
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_masking_layer():
    ''' This test based on a previously failing issue here:
    https://github.com/fchollet/keras/issues/1567

    '''
    I = np.random.random((6, 3, 4))
    V = np.abs(np.random.random((6, 3, 5)))
    V /= V.sum(axis=-1, keepdims=True)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.LSTM(output_dim=5, return_sequences=True, unroll=False))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(I, V, nb_epoch=1, batch_size=100, verbose=1)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.LSTM(output_dim=5, return_sequences=True, unroll=True))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(I, V, nb_epoch=1, batch_size=100, verbose=1)
项目:seq2graph    作者:masterkeywikz    | 项目源码 | 文件源码
def __init__(self, output_dim, hidden_dim, output_length, depth=1, dropout=0.25, **kwargs):
        super(SimpleSeq2seq, self).__init__()
        if type(depth) not in [list, tuple]:
            depth = (depth, depth)
        self.encoder = LSTM(hidden_dim, **kwargs)
        self.decoder = LSTM(hidden_dim, return_sequences=True, **kwargs)
        for i in range(1, depth[0]):
            self.add(LSTM(hidden_dim, return_sequences=True, **kwargs))
            self.add(Dropout(dropout))
        self.add(self.encoder)
        self.add(Dropout(dropout))
        self.add(RepeatVector(output_length))
        self.add(self.decoder)
        for i in range(1, depth[1]):
            self.add(LSTM(hidden_dim, return_sequences=True, **kwargs))
            self.add(Dropout(dropout))
        #if depth[1] > 1:
        self.add(TimeDistributedDense(output_dim, activation='softmax'))
项目:patriots    作者:wdxtub    | 项目源码 | 文件源码
def train_lstm(dict,x,y,xt,yt):
  model = Sequential()
  model.add(Embedding(len(dict)+1, 256, input_length=maxlen))
  model.add(LSTM(output_dim=128, activation='sigmoid', inner_activation='hard_sigmoid'))
  model.add(Dropout(0.5))
  model.add(Dense(1))
  # model.add(Dense(input_dim = 32, output_dim = 1))
  model.add(Activation('sigmoid'))
  print ('??????')
  #model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
  print ("??????")
  model.fit(x, y, batch_size=lstm_batch_size, epochs=lstm_epochs, verbose=0)
  print ("??????")
  print ("????")
  yaml_string = model.to_yaml()
  with open(modeldir + '/lstm.yml', 'w') as outfile:
    outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
  model.save_weights(modeldir + '/lstm.h5')
  print ("?????")
  score = model.evaluate(xt, yt, verbose=0)
  print ("???:",score[1])
  return model
项目:bulbea    作者:achillesrasquinha    | 项目源码 | 文件源码
def __init__(self, sizes,
                 cell       = RNNCell.LSTM,
                 dropout    = 0.2,
                 activation = 'linear',
                 loss       = 'mse',
                 optimizer  = 'rmsprop'):
        self.model = Sequential()
        self.model.add(cell(
            input_dim        = sizes[0],
            output_dim       = sizes[1],
            return_sequences = True
        ))

        for i in range(2, len(sizes) - 1):
            self.model.add(cell(sizes[i], return_sequences = False))
            self.model.add(core.Dropout(dropout))

        self.model.add(core.Dense(output_dim = sizes[-1]))
        self.model.add(core.Activation(activation))

        self.model.compile(loss = loss, optimizer = optimizer)
项目:Recurrent_Neural_Net_Meetup    作者:GalvanizeOpenSource    | 项目源码 | 文件源码
def build_model(layers):
    model = Sequential()

    model.add(LSTM(
        input_shape=(layers[1], layers[0]),
        output_dim=layers[1],
        return_sequences=True))
    model.add(Activation("tanh"))
    model.add(Dropout(0.2))

    model.add(LSTM(
        layers[2],
        return_sequences=False))
    #model.add(Activation("tanh"))
    model.add(Dropout(0.2))

    model.add(Dense(
        output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:Stocky    作者:leavenstee    | 项目源码 | 文件源码
def build_model(layers):
    model = Sequential()

    model.add(LSTM(
        input_dim=layers[0],
        output_dim=layers[1],
        return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(
        layers[2],
        return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(
        output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print "Compilation Time : ", time.time() - start
    return model
项目:lstm-talk    作者:marionleborgne    | 项目源码 | 文件源码
def create_model(sequence_length, layers):
    model = Sequential()
    model.add(LSTM(units=layers['hidden1'],
                   input_shape=(sequence_length - 1, layers['input']),
                   return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(units=layers['hidden2'], return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(units=layers['hidden3'], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=layers['output']))
    model.add(Activation("linear"))

    model.compile(loss="mse", optimizer="rmsprop")
    return model
项目:DeepSpell_temp    作者:surmenok    | 项目源码 | 文件源码
def generate_model(output_len, chars=None):
    """Generate the model"""
    print('Build model...')
    chars = chars or CHARS
    model = Sequential()
    # "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
    # note: in a situation where your input sequences have a variable length,
    # use input_shape=(None, nb_feature).
    for layer_number in range(INPUT_LAYERS):
        model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)), init=INITIALIZATION,
                                 return_sequences=layer_number + 1 < INPUT_LAYERS))
        model.add(Dropout(AMOUNT_OF_DROPOUT))
    # For the decoder's input, we repeat the encoded input for each time step
    model.add(RepeatVector(output_len))
    # The decoder RNN could be multiple layers stacked or a single layer
    for _ in range(OUTPUT_LAYERS):
        model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=INITIALIZATION))
        model.add(Dropout(AMOUNT_OF_DROPOUT))

    # For each of step of the output sequence, decide which character should be chosen
    model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
项目:stock-predict-by-RNN-LSTM    作者:blockchain99    | 项目源码 | 文件源码
def __prepare_model(self):
        print('Build model...')
        model = Sequential()
        model.add(LSTM(output_dim=self.hidden_cnt,
                       input_dim=self.input_dim,
                       input_length=self.input_length,
                       return_sequences=False))
#         model.add(Dropout(0.5))
#myadd  0.9375         
        model.add(Dropout(0.93755))
        model.add(Dense(self.hidden_cnt, activation='tanh'))
        model.add(Dense(self.output_dim, activation='softmax'))

        print('Compile model...')
#         sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#         model.compile(loss='categorical_crossentropy', optimizer=sgd)
#         return model
# my add adgradoptimizer        
        adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adagrad)
        return model
项目:shell-complete    作者:src-d    | 项目源码 | 文件源码
def setUp(self):
        self.log = logging.getLogger("test")
        self.log.setLevel(logging.INFO)
        self.path_to_vocab = "shcomplete/tests/data/vocab_0.01.txt"
        self.chars = get_chars(self.path_to_vocab)
        self.path_to_corpus = "shcomplete/tests/data/corpus.txt"
        self.models_directory = "shcomplete/tests/data"
        self.args = argparse.Namespace(vocabulary=self.path_to_vocab, corpus=self.path_to_corpus,
                                       models_directory=self.models_directory, max_cmd_len=40,
                                       input_layers=1, hidden_layers=4, output_layers=1,
                                       dropout=0.2, batch_size=32, level_noise=0.4,
                                       nb_predictions=2, nb_epochs=1, steps_per_epoch=64,
                                       from_model=None, checkpoint=2, optimizer="adam",
                                       cell_type=recurrent.LSTM)
        self.model = generate_model(self.args, nb_features=len(self.chars)+1,
                                    input_length=self.args.max_cmd_len,
                                    nb_repeats=self.args.max_cmd_len)
项目:keras_weight_animator    作者:brannondorsey    | 项目源码 | 文件源码
def get_model(batch_size, window_size):

    model = Sequential()
    model.add(LSTM(64, 
                   batch_input_shape=(batch_size, window_size, 3), 
                   return_sequences=False, stateful=False))
    model.add(Activation('relu'))
    model.Add(Dropout(0.25))
    model.add(Dense(6))
    model.add(Activation('softmax'))

    model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
    return model
项目:wort2vek    作者:HPI-DeepLearning    | 项目源码 | 文件源码
def model(self, n_inputs, n_hidden):
        model = Sequential()
        model.add(self.embedding_layer(n_inputs))
        model.add(LSTM(n_hidden))
        model.add(Dense(self.nb_words(), activation='softmax'))
        return model
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def my_model(X_train, y_train, X_test, y_test):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 3
    ################### Model ################

    ######### begin model ########
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # fc layer
    model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    ########################################################################
    checkpoint = ModelCheckpoint("weights/hypmodel2_maha1_noep{0}_batch{1}_seq_{2}.hdf5".format(
        no_epochs, batch, line_length), monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='min')

    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08,
                      clipvalue={{choice([0, 1, 2, 3, 4, 5, 6, 7])}})
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    history = History()
    # fit model
    model.fit(X_train, y_train, batch_size=batch, nb_epoch=no_epochs,
              validation_split=0.2, callbacks=[history, checkpoint])

    score, acc = model.evaluate(X_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def my_model(dropout):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 5
    ################### Model ################
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout(dropout))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout(dropout))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout(dropout))
    model.add(Reshape((248, 512)))
    # fc layer
    model.add(TimeDistributed(Dense(58, activation='softmax')))
    # model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    # model.layers.pop()
    # model.layers.pop()
    # model.add(Dropout(dropout))
    #model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08)
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    ###load weights####
    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def rnn_test(f):
    """
    All the recurrent layers share the same interface,
    so we can run through them with a single function.
    """
    f = keras_test(f)
    return pytest.mark.parametrize("layer_class", [
        recurrent.SimpleRNN,
        recurrent.GRU,
        recurrent.LSTM
    ])(f)
项目:five-video-classification-methods    作者:harvitronix    | 项目源码 | 文件源码
def lstm(self):
        """Build a simple LSTM network. We pass the extracted features from
        our CNN to this model predomenently."""
        # Model.
        model = Sequential()
        model.add(LSTM(2048, return_sequences=False,
                       input_shape=self.input_shape,
                       dropout=0.5))
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
项目:autolipsync    作者:evgenijkatunov    | 项目源码 | 文件源码
def init(self):
        self.model = Sequential()
        self.model.add(Bidirectional(LSTM(126, return_sequences=True), 'sum',
                                     input_shape=(self._max_frames, self._features_count)))
        self.model.add(Dropout(0.5))
        self.model.add(TimeDistributed(Dense(units=self._phonemes_count, activation='softmax')))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='rmsprop',
                           metrics=[metrics.categorical_accuracy])
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_model(timestep,input_dim,output_dim,dropout=0.5,recurrent_layers_num=4,cnn_layers_num=6,lr=0.001):
    inp = Input(shape=(timestep,input_dim))
    output = TimeDistributed(Masking(mask_value=0))(inp)
    #output = inp
    output = Conv1D(128, 1)(output)
    output = BatchNormalization()(output)
    output = Activation('relu')(output)

    output = first_block(output, (64, 128), dropout=dropout)


    output = Dropout(dropout)(output)
    for _ in range(cnn_layers_num):
        output = repeated_block(output, (64, 128), dropout=dropout)

    output = Flatten()(output)
    #output = LSTM(128, return_sequences=False)(output)

    output = BatchNormalization()(output)
    output = Activation('relu')(output)
    output = Dense(output_dim)(output)


    model = Model(inp,output)

    optimizer = Adam(lr=lr)

    model.compile(optimizer,'mse',['mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:visualqa    作者:AndreiBarsan    | 项目源码 | 文件源码
def process_input(self, question):
        """
        Processing the input is model specific. While an easy
        model would just sum up embedding vectors, more advanced
        models might use a LSTM layer. This method is called in training
        and testing and should return the input vector for the neural
        network for a given question.
        :param question: a list of unicode objects
        :return: the input vector for the language model
        """
        pass
项目:RNN_LSTM_Stock_Model    作者:als5ev    | 项目源码 | 文件源码
def build_model(layers):
    d = 0.2
    model = Sequential()
    model.add(LSTM(128, input_shape=(layers[1], layers[0]), return_sequences=True))
    model.add(Dropout(d))
    model.add(LSTM(64, input_shape=(layers[1], layers[0]), return_sequences=False))
    model.add(Dropout(d))
    model.add(Dense(16, kernel_initializer="uniform"))
    model.add(LeakyReLU(alpha=0.3))
    model.add(Dense(layers[2], kernel_initializer="uniform"))
    model.add(LeakyReLU(alpha=0.3))
    #adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    model.compile(loss='mse', optimizer="rmsprop", metrics=['accuracy'])
    return model
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:deepjazz    作者:jisungk    | 项目源码 | 文件源码
def build_model(corpus, val_indices, max_len, N_epochs=128):
    # number of different values or words in corpus
    N_values = len(set(corpus))

    # cut the corpus into semi-redundant sequences of max_len values
    step = 3
    sentences = []
    next_values = []
    for i in range(0, len(corpus) - max_len, step):
        sentences.append(corpus[i: i + max_len])
        next_values.append(corpus[i + max_len])
    print('nb sequences:', len(sentences))

    # transform data into binary matrices
    X = np.zeros((len(sentences), max_len, N_values), dtype=np.bool)
    y = np.zeros((len(sentences), N_values), dtype=np.bool)
    for i, sentence in enumerate(sentences):
        for t, val in enumerate(sentence):
            X[i, t, val_indices[val]] = 1
        y[i, val_indices[next_values[i]]] = 1

    # build a 2 stacked LSTM
    model = Sequential()
    model.add(LSTM(128, return_sequences=True, input_shape=(max_len, N_values)))
    model.add(Dropout(0.2))
    model.add(LSTM(128, return_sequences=False))
    model.add(Dropout(0.2))
    model.add(Dense(N_values))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X, y, batch_size=128, nb_epoch=N_epochs)

    return model
项目:triplets-extraction    作者:zsctju    | 项目源码 | 文件源码
def creat_binary_tag_LSTM( sourcevocabsize,targetvocabsize, source_W,input_seq_lenth ,output_seq_lenth ,
    hidden_dim ,emd_dim,loss='categorical_crossentropy',optimizer = 'rmsprop'):
    encoder_a = Sequential()
    encoder_b = Sequential()
    encoder_c = Sequential()
    l_A_embedding = Embedding(input_dim=sourcevocabsize+1,
                        output_dim=emd_dim,
                        input_length=input_seq_lenth,
                        mask_zero=True,
                        weights=[source_W])
    encoder_a.add(l_A_embedding)
    encoder_a.add(Dropout(0.3))
    encoder_b.add(l_A_embedding)
    encoder_b.add(Dropout(0.3))
    encoder_c.add(l_A_embedding)

    Model = Sequential()

    encoder_a.add(LSTM(hidden_dim,return_sequences=True))
    encoder_b.add(LSTM(hidden_dim,return_sequences=True,go_backwards=True))
    encoder_rb = Sequential()
    encoder_rb.add(ReverseLayer2(encoder_b))
    encoder_ab=Merge(( encoder_a,encoder_rb),mode='concat')
    Model.add(encoder_ab)

    decodelayer=LSTMDecoder_tag(hidden_dim=hidden_dim, output_dim=hidden_dim
                                         , input_length=input_seq_lenth,
                                        output_length=output_seq_lenth,
                                        state_input=False,
                                         return_sequences=True)
    Model.add(decodelayer)
    Model.add(TimeDistributedDense(targetvocabsize+1))
    Model.add(Activation('softmax'))
    Model.compile(loss=loss, optimizer=optimizer)
    return Model
项目:sciDT    作者:edvisees    | 项目源码 | 文件源码
def fit_model(self, X, Y, use_attention, att_context, bidirectional):
    print >>sys.stderr, "Input shape:", X.shape, Y.shape
    early_stopping = EarlyStopping(patience = 2)
    num_classes = len(self.label_ind)
    if bidirectional:
      tagger = Graph()
      tagger.add_input(name='input', input_shape=X.shape[1:])
      if use_attention:
        tagger.add_node(TensorAttention(X.shape[1:], context=att_context), name='attention', input='input')
        lstm_input_node = 'attention'
      else:
        lstm_input_node = 'input'
      tagger.add_node(LSTM(X.shape[-1]/2, return_sequences=True), name='forward', input=lstm_input_node)
      tagger.add_node(LSTM(X.shape[-1]/2, return_sequences=True, go_backwards=True), name='backward', input=lstm_input_node)
      tagger.add_node(TimeDistributedDense(num_classes, activation='softmax'), name='softmax', inputs=['forward', 'backward'], merge_mode='concat', concat_axis=-1)
      tagger.add_output(name='output', input='softmax')
      tagger.summary()
      tagger.compile('adam', {'output':'categorical_crossentropy'})
      tagger.fit({'input':X, 'output':Y}, validation_split=0.1, callbacks=[early_stopping], show_accuracy=True, nb_epoch=100, batch_size=10)
    else:
      tagger = Sequential()
      word_proj_dim = 50
      if use_attention:
        _, input_len, timesteps, input_dim = X.shape
        tagger.add(HigherOrderTimeDistributedDense(input_dim=input_dim, output_dim=word_proj_dim))
        att_input_shape = (input_len, timesteps, word_proj_dim)
        print >>sys.stderr, "Attention input shape:", att_input_shape
        tagger.add(Dropout(0.5))
        tagger.add(TensorAttention(att_input_shape, context=att_context))
      else:
        _, input_len, input_dim = X.shape
        tagger.add(TimeDistributedDense(input_dim=input_dim, input_length=input_len, output_dim=word_proj_dim))
      tagger.add(LSTM(input_dim=word_proj_dim, output_dim=word_proj_dim, input_length=input_len, return_sequences=True))
      tagger.add(TimeDistributedDense(num_classes, activation='softmax'))
      tagger.summary()
      tagger.compile(loss='categorical_crossentropy', optimizer='adam')
      tagger.fit(X, Y, validation_split=0.1, callbacks=[early_stopping], show_accuracy=True, batch_size=10)

    return tagger
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def call(self, x, mask=None):
        return super(LSTM, self).call(x, None)