Python keras.layers 模块,GRU 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.GRU

项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_temporal_regression():
    '''
    Predict float numbers (regression) based on sequences
    of float numbers of length 3 using a single layer of GRU units
    '''
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=400,
                                                         input_shape=(3, 5),
                                                         output_shape=(2,),
                                                         classification=False)
    model = Sequential()
    model.add(GRU(y_train.shape[-1],
                  input_shape=(X_train.shape[1], X_train.shape[2])))
    model.compile(loss='hinge', optimizer='adam')
    history = model.fit(X_train, y_train, nb_epoch=5, batch_size=16,
                        validation_data=(X_test, y_test), verbose=0)
    assert(history.history['val_loss'][-1] < 1.)
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def __call__(self, inputs):
        x = self._merge_inputs(inputs)

        shape = getattr(x, '_keras_shape')
        replicate_model = self._replicate_model(kl.Input(shape=shape[2:]))
        x = kl.TimeDistributed(replicate_model)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Bidirectional(kl.GRU(128, kernel_regularizer=kernel_regularizer,
                                    return_sequences=True),
                             merge_mode='concat')(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        gru = kl.GRU(256, kernel_regularizer=kernel_regularizer)
        x = kl.Bidirectional(gru)(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x)
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def HAN1(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    #model = Sequential()
    wordInputs = Input(shape=(MAX_WORDS,), name='word1', dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='emb1')(wordInputs) #Assuming all the sentences have same number of words. Check for input_length again.


    hij = Bidirectional(GRU(WORDGRU, name='gru1', return_sequences=True))(wordEmbedding)


    wordDrop = Dropout(DROPOUTPER, name='drop1')(hij)

    alpha_its, Si = AttentionLayer(name='att1')(wordDrop)   

    v6 = Dense(1, activation="sigmoid", name="dense")(Si)
    #model.add(Dense(1, activation="sigmoid", name="documentOut3"))
    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    return model
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def fGRU_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')

    wordInp = Flatten()(wordInputs)

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    head = GlobalAveragePooling1D()(hij) 

    v6 = Dense(1, activation="sigmoid", name="dense")(head)

    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
项目:reactionrnn    作者:minimaxir    | 项目源码 | 文件源码
def reactionrnn_model(weights_path, num_classes, maxlen=140):
    '''
    Builds the model architecture for textgenrnn and
    loads the pretrained weights for the model.
    '''

    input = Input(shape=(maxlen,), name='input')
    embedded = Embedding(num_classes, 100, input_length=maxlen,
                         name='embedding')(input)
    rnn = GRU(256, return_sequences=False, name='rnn')(embedded)
    output = Dense(5, name='output',
                   activation=lambda x: K.relu(x) / K.sum(K.relu(x),
                                                          axis=-1))(rnn)

    model = Model(inputs=[input], outputs=[output])
    model.load_weights(weights_path, by_name=True)
    model.compile(loss='mse', optimizer='nadam')
    return model
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_char_gru_model(self, emb_dim, word_maxlen, vocab_char_size,
                              char_maxlen):
        from keras.layers import GRU
        logger.info('Building character GRU model')
        input_char = Input(shape=(char_maxlen, ), name='input_char')
        char_emb = Embedding(
            vocab_char_size, emb_dim, mask_zero=True)(input_char)
        gru = GRU(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(char_emb)
        dropped = Dropout(0.5)(gru)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_char, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        logger.info('  Done')
        return model
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def recursive_test_helper(layer, channel_index):
    main_input = Input(shape=[32, 10])
    x = layer(main_input)
    x = GRU(4, return_sequences=False)(x)
    main_output = Dense(5)(x)
    model = Model(inputs=main_input, outputs=main_output)

    # Delete channels
    del_layer_index = 1
    next_layer_index = 2
    del_layer = model.layers[del_layer_index]
    new_model = operations.delete_channels(model, del_layer, channel_index)
    new_w = new_model.layers[next_layer_index].get_weights()

    # Calculate next layer's correct weights
    channel_count = getattr(del_layer, utils.get_channels_attr(del_layer))
    channel_index = [i % channel_count for i in channel_index]
    correct_w = model.layers[next_layer_index].get_weights()
    correct_w[0] = np.delete(correct_w[0], channel_index, axis=0)

    assert weights_equal(correct_w, new_w)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_initial_state_GRU(self):
        data = np.random.rand(1, 1, 2)

        model = keras.models.Sequential()
        model.add(keras.layers.GRU(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
        model.get_layer(index=1).reset_states()

        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        keras_output_1 = model.predict(data)
        coreml_full_output_1 = coreml_model.predict({'data': data})
        coreml_output_1 = coreml_full_output_1['output']
        coreml_output_1 = np.expand_dims(coreml_output_1, 1)

        np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)

        hidden_state = (np.random.rand(1, 5))
        model.get_layer(index=1).reset_states(states=hidden_state)
        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        spec = coreml_model.get_spec()
        keras_output_2 = model.predict(data)
        coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]})
        coreml_output_2 = coreml_full_output_2['output']
        coreml_output_2 = np.expand_dims(coreml_output_2, 1)
        np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_small_no_sequence_gru_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_length = 1
        num_channels = 1

        # Define a model
        model = Sequential()
        model.add(GRU(num_channels, input_shape = (input_length, input_dim),
               recurrent_activation = 'sigmoid'))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_medium_no_sequence_gru_random(self,
                                           model_precision=_MLMODEL_FULL_PRECISION):
        np.random.seed(1988)
        input_dim = 10
        input_length = 1
        num_channels = 10

        # Define a model
        model = Sequential()
        model.add(GRU(num_channels, input_shape = (input_length, input_dim), recurrent_activation = 'sigmoid'))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob='data', output_blob='output',
                               model_precision=model_precision)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_temporal_regression():
    '''
    Predict float numbers (regression) based on sequences
    of float numbers of length 3 using a single layer of GRU units
    '''
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=400,
                                                         input_shape=(3, 5),
                                                         output_shape=(2,),
                                                         classification=False)
    model = Sequential()
    model.add(GRU(y_train.shape[-1],
                  input_shape=(X_train.shape[1], X_train.shape[2])))
    model.compile(loss='hinge', optimizer='adam')
    history = model.fit(X_train, y_train, nb_epoch=5, batch_size=16,
                        validation_data=(X_test, y_test), verbose=0)
    assert(history.history['val_loss'][-1] < 1.)
项目:botcycle    作者:D2KLab    | 项目源码 | 文件源码
def bidirectional_gru(len_output):
    # sequence_input is a matrix of glove vectors (one for each input word)
    sequence_input = Input(
        shape=(MAX_SEQUENCE_LENGTH, EMBEDDING_DIM,), dtype='float32')
    l_lstm = Bidirectional(GRU(100))(sequence_input)
    # TODO look call(input_at_t, states_at_t) method, returning (output_at_t, states_at_t_plus_1)
    # also look at switch(condition, then_expression, else_expression) for deciding when to feed previous state
    preds = Dense(len_output, activation='softmax')(l_lstm)
    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=[utils.f1_score, 'categorical_accuracy'])

    return model

# required, see values below
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_temporal_regression():
    '''
    Predict float numbers (regression) based on sequences
    of float numbers of length 3 using a single layer of GRU units
    '''
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=400,
                                                         input_shape=(3, 5),
                                                         output_shape=(2,),
                                                         classification=False)
    model = Sequential()
    model.add(GRU(y_train.shape[-1],
                  input_shape=(X_train.shape[1], X_train.shape[2])))
    model.compile(loss='hinge', optimizer='adam')
    history = model.fit(X_train, y_train, nb_epoch=5, batch_size=16,
                        validation_data=(X_test, y_test), verbose=0)
    assert(history.history['val_loss'][-1] < 1.)
项目:CIKM_AnalytiCup_2017    作者:zxth93    | 项目源码 | 文件源码
def BiGRU(X_train, y_train, X_test, y_test, gru_units, dense_units, input_shape, \
           batch_size, epochs, drop_out, patience):

    model = Sequential()

    reg = L1L2(l1=0.2, l2=0.2)

    model.add(Bidirectional(GRU(units = gru_units, dropout= drop_out, activation='relu', recurrent_regularizer = reg,
                                return_sequences = True),
                                input_shape = input_shape,
                                merge_mode="concat"))

    model.add(BatchNormalization())

    model.add(TimeDistributed(Dense(dense_units, activation='relu')))
    model.add(BatchNormalization())

    model.add(Bidirectional(GRU(units = gru_units, dropout= drop_out, activation='relu', recurrent_regularizer=reg,
                                    return_sequences = True),
                             merge_mode="concat"))

    model.add(BatchNormalization())

    model.add(Dense(units=1))

    model.add(GlobalAveragePooling1D())

    print(model.summary())

    early_stopping = EarlyStopping(monitor="val_loss", patience = patience)

    model.compile(loss='mse', optimizer= 'adam')

    history_callback = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,\
              verbose=2, callbacks=[early_stopping], validation_data=[X_test, y_test], shuffle = True)

    return model, history_callback
项目:temporal-attention    作者:dosht    | 项目源码 | 文件源码
def main():
    print("\n\nLoading data...")
    data_dir = "/data/translate"
    vocab_size = 20000
    en, fr = prepare_date(data_dir, vocab_size)

    print("\n\nbuilding the model...")
    embedding_size = 64
    hidden_size = 32
    model = Sequential()
    model.add(Embedding(en.max_features, embedding_size, input_length=en.max_length, mask_zero=True))
    model.add(Bidirectional(GRU(hidden_size), merge_mode='sum'))
    model.add(RepeatVector(fr.max_length))
    model.add(GRU(embedding_size))
    model.add(Dense(fr.max_length, activation="softmax"))
    model.compile('rmsprop', 'mse')
    print(model.get_config())

    print("\n\nFitting the model...")
    model.fit(en.examples, fr.examples)

    print("\n\nEvaluation...")
    #TODO
项目:temporal-attention    作者:dosht    | 项目源码 | 文件源码
def main():
    print("\n\nLoading data...")
    data_dir = "/data/translate"
    vocab_size = 20000
    en, fr = prepare_date(data_dir, vocab_size)

    print("\n\nbuilding the model...")
    embedding_size = 64
    hidden_size = 32
    model = Sequential()
    model.add(Embedding(en.max_features, embedding_size, input_length=en.max_length, mask_zero=True))
    model.add(Bidirectional(GRU(hidden_size), merge_mode='sum'))
    model.add(RepeatVector(fr.max_length))
    model.add(GRU(embedding_size))
    model.add(Dense(fr.max_length, activation="softmax"))
    model.compile('rmsprop', 'mse')
    print(model.get_config())

    print("\n\nFitting the model...")
    model.fit(en.examples, fr.examples)

    print("\n\nEvaluation...")
    #TODO
项目:image_caption    作者:MaticsL    | 项目源码 | 文件源码
def image_caption_model(vocab_size=2500, embedding_matrix=None, lang_dim=100,
            max_caplen=28, img_dim=2048, clipnorm=1):
    print('generating vocab_history model v5')
    # text: current word
    lang_input = Input(shape=(1,))
    img_input = Input(shape=(img_dim,))
    seq_input = Input(shape=(max_caplen,))
    vhist_input = Input(shape=(vocab_size,))

    if embedding_matrix is not None:
        x = Embedding(output_dim=lang_dim, input_dim=vocab_size, init='glorot_uniform', input_length=1, weights=[embedding_matrix])(lang_input)
    else:
        x = Embedding(output_dim=lang_dim, input_dim=vocab_size, init='glorot_uniform', input_length=1)(lang_input)

    lang_embed = Reshape((lang_dim,))(x)
    lang_embed = merge([lang_embed, seq_input], mode='concat', concat_axis=-1)
    lang_embed = Dense(lang_dim)(lang_embed)
    lang_embed = Dropout(0.25)(lang_embed)

    merge_layer = merge([img_input, lang_embed, vhist_input], mode='concat', concat_axis=-1)
    merge_layer = Reshape((1, lang_dim+img_dim+vocab_size))(merge_layer)

    gru_1 = GRU(img_dim)(merge_layer)
    gru_1 = Dropout(0.25)(gru_1)
    gru_1 = Dense(img_dim)(gru_1)
    gru_1 = BatchNormalization()(gru_1)
    gru_1 = Activation('softmax')(gru_1)

    attention_1 = merge([img_input, gru_1], mode='mul', concat_axis=-1)
    attention_1 = merge([attention_1, lang_embed, vhist_input], mode='concat', concat_axis=-1)
    attention_1 = Reshape((1, lang_dim + img_dim + vocab_size))(attention_1)
    gru_2 = GRU(1024)(attention_1)
    gru_2 = Dropout(0.25)(gru_2)
    gru_2 = Dense(vocab_size)(gru_2)
    gru_2 = BatchNormalization()(gru_2)
    out = Activation('softmax')(gru_2)

    model = Model(input=[img_input, lang_input, seq_input, vhist_input], output=out)
    model.compile(loss='categorical_crossentropy', optimizer=RMSprop(lr=0.0001, clipnorm=1.))
    return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def setUp(self):
        super(TestRNNEncoderWithGRUClass, self).setUp()
        self.model = self.create_model(GRU)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def setUp(self):
        super(TestRNNEncoderWithRNNCellGRUClass, self).setUp()
        self.model = self.create_model(GRU)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def setUp(self):
        super(TestRNNCellWithGRUClass, self).setUp()
        self.model = self.create_model(GRU)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def setUp(self):
        super(TestRNNDecoderWithGRUClass, self).setUp()
        self.model = self.create_model(GRU)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def setUp(self):
        super(TestRNNDecoderWithRNNCellGRUClass, self).setUp()
        self.model = self.create_model(GRU)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def setUp(self):
        super(TestBidirectionalRNNEncoderWithRNNCellGRUClass, self).setUp()
        self.model = self.create_model(GRU)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def setUp(self):
        super(TestRNNDecoderWithDecodingSizeGRUClass, self).setUp()
        self.model = self.create_model(GRU)
        self.max_length = self.decoding_length
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def GRUf(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_SENTS+1, MAX_WORDS), name="wordInputs", dtype='float32')

    wordInp = Flatten()(wordInputs)

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInp) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=False), name='gru1')(wordEmbedding)

    v6 = Dense(1, activation="sigmoid", name="dense")(hij)

    model = Model(inputs=[wordInputs] , outputs=[v6])
    model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    return model
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def fhan2_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)


    Si = GlobalAveragePooling1D()(hij)

    wordEncoder = Model(wordInputs, Si)

    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    #sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    Vb = GlobalAveragePooling1D()(hi)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'glorot_uniform', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def han2(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):

    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    #print 'in han2 max-nb-words'
    #print MAX_NB_WORDS

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    alpha_its, Si = AttentionLayer(name='att1')(hij)

    #wordDrop = Dropout(DROPOUTPER, name='wordDrop')(Si)

    wordEncoder = Model(wordInputs, Si)
    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(sentenceMasking) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    alpha_s, Vb = AttentionLayer(name='att2')(hi)

    #sentDrop = Dropout(DROPOUTPER, name='sentDrop')(Vb)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model, wordEncoder
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def create_temporal_sequential_model():
    model = Sequential()
    model.add(GRU(32, input_shape=(timesteps, input_dim), return_sequences=True))
    model.add(TimeDistributedDense(nb_classes))
    model.add(Activation('softmax'))
    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_temporal_classification():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=500,
                                                         nb_test=500,
                                                         input_shape=(3, 5),
                                                         classification=True,
                                                         nb_class=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential()
    model.add(GRU(y_train.shape[-1],
                  input_shape=(X_train.shape[1], X_train.shape[2]),
                  activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adagrad',
                  metrics=['accuracy'])
    history = model.fit(X_train, y_train, nb_epoch=20, batch_size=32,
                        validation_data=(X_test, y_test),
                        verbose=0)
    assert(history.history['val_acc'][-1] >= 0.8)
项目:rnn-playlist-prediction    作者:burakkose    | 项目源码 | 文件源码
def process(self):
        self.model.add(Embedding(len(self.song_hash) + 1, 50, mask_zero=True))
        self.model.add(SpatialDropout1D(rate=0.20))
        self.model.add(GRU(128))
        self.model.add(Dense(len(self.song_hash) + 1, activation=self.activation))

        self.model.compile(optimizer=self.optimizer,
                           loss=self.loss,
                           metrics=self.metrics)

        self.model.fit(self.x_train, self.y_train, epochs=100, batch_size=512, validation_split=0.1,
                       callbacks=self.callbacks)

        return self
项目:mycroft    作者:wpm    | 项目源码 | 文件源码
def __init__(self, training, sequence_length=None, vocabulary_size=None,
                 train_embeddings=SequentialTextEmbeddingClassifier.TRAIN_EMBEDDINGS,
                 language_model=LANGUAGE_MODEL, rnn_type=RNN_TYPE, rnn_units=RNN_UNITS, bidirectional=BIDIRECTIONAL,
                 dropout=DROPOUT, learning_rate=LEARNING_RATE):
        from keras.models import Sequential
        from keras.layers import Bidirectional, Dense, Dropout, GRU, LSTM
        from keras.optimizers import Adam

        label_names, sequence_length, vocabulary_size = self.parameters_from_training(sequence_length, vocabulary_size,
                                                                                      training, language_model)
        embedder = TextSequenceEmbedder(vocabulary_size, sequence_length, language_model)

        model = Sequential()
        model.add(self.embedding_layer(embedder, sequence_length, train_embeddings, mask_zero=True, name="embedding"))
        rnn_class = {"lstm": LSTM, "gru": GRU}[rnn_type]
        for i, units in enumerate(rnn_units, 1):
            name = "rnn-%d" % i
            return_sequences = i < len(rnn_units)
            if bidirectional:
                rnn = Bidirectional(rnn_class(units, return_sequences=return_sequences), name=name)
            else:
                rnn = rnn_class(units, return_sequences=return_sequences, name=name)
            model.add(rnn)
            model.add(Dropout(dropout, name="dropout-%d" % i))
        model.add(Dense(len(label_names), activation="softmax", name="softmax"))
        optimizer = Adam(lr=learning_rate)
        model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"])

        self.rnn_units = rnn_units
        self.bidirectional = bidirectional
        self.dropout = dropout
        super().__init__(model, embedder, label_names)
项目:merlin    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def __init__(self, n_in, hidden_layer_size, n_out, hidden_layer_type, output_type='linear', dropout_rate=0.0, loss_function='mse', optimizer='adam'):
        """ This function initialises a neural network

        :param n_in: Dimensionality of input features
        :param hidden_layer_size: The layer size for each hidden layer
        :param n_out: Dimensionality of output features
        :param hidden_layer_type: the activation types of each hidden layers, e.g., TANH, LSTM, GRU, BLSTM
        :param output_type: the activation type of the output layer, by default is 'LINEAR', linear regression.
        :param dropout_rate: probability of dropout, a float number between 0 and 1.
        :type n_in: Integer
        :type hidden_layer_size: A list of integers
        :type n_out: Integrer
        """

        self.n_in  = int(n_in)
        self.n_out = int(n_out)

        self.n_layers = len(hidden_layer_size)

        self.hidden_layer_size = hidden_layer_size
        self.hidden_layer_type = hidden_layer_type

        assert len(self.hidden_layer_size) == len(self.hidden_layer_type)

        self.output_type   = output_type
        self.dropout_rate  = dropout_rate
        self.loss_function = loss_function
        self.optimizer     = optimizer

        # create model
        self.model = Sequential()
项目:medaka    作者:nanoporetech    | 项目源码 | 文件源码
def build_model(chunk_size, feature_len, num_classes, gru_size=128):
    """Builds a bidirectional GRU model"""

    model = Sequential()
    # Bidirectional wrapper takes a copy of the first argument and reverses
    #   the direction. Weights are independent between components.
    model.add(Bidirectional(
        GRU(gru_size, activation='tanh', return_sequences=True, name='gru1'),
        input_shape=(chunk_size, feature_len))
    )
    model.add(Bidirectional(
        GRU(gru_size, activation='tanh', return_sequences=True, name='gru2'))
    )
    model.add(Dense(num_classes, activation='softmax', name='classify'))
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_stateful_lstm_model(batch_size,time_step,input_dim,output_dim,dropout=0.2,rnn_layer_num=2,hidden_dim=128,hidden_num=0,rnn_type='LSTM'):

    model = Sequential()
    # may use BN for accelerating speed
    # add first LSTM
    if rnn_type == 'LSTM':
        rnn_cell = LSTM
    elif rnn_type == 'GRU':
        rnn_cell = GRU
    elif rnn_type == 'SimpleRNN':
        rnn_cell = SimpleRNN
    else:
        raise ValueError('Option rnn_type could only be configured as LSTM, GRU or SimpleRNN')
    model.add(rnn_cell(hidden_dim,return_sequences=True,batch_input_shape=(batch_size,time_step,input_dim)))

    for _ in range(rnn_layer_num-2):
        model.add(rnn_cell(hidden_dim, return_sequence=True))
        # prevent over fitting
        model.add(Dropout(dropout))



    model.add(rnn_cell(hidden_dim,return_sequences=False))

    # add hidden layer

    for _ in range(hidden_num):
        model.add(Dense(hidden_dim))

    model.add(Dropout(dropout))

    model.add(Dense(output_dim))

    rmsprop = RMSprop(lr=0.01)
    adam = Adam(lr=0.01)


    model.compile(loss='mse',metrics=['acc'],optimizer=rmsprop)

    return model
项目:smiles-neural-network    作者:PMitura    | 项目源码 | 文件源码
def setModelConsumeLess(model, mode='cpu'):
    for i in xrange(len(model.layers)):
        if type(model.layers[i]) is GRU or type(model.layers[i]) is LSTM:
            model.layers[i].consume_less = mode
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_import(self):
        model = Sequential()
        model.add(LSTM(64, return_sequences=True, input_shape=(10, 64)))
        model.add(SimpleRNN(32, return_sequences=True))
        model.add(GRU(10, kernel_regularizer=regularizers.l2(0.01),
                      bias_regularizer=regularizers.l2(0.01), recurrent_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                      bias_constraint='max_norm', recurrent_constraint='max_norm'))
        model.build()
        json_string = Model.to_json(model)
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
            json.dump(json.loads(json_string), out, indent=4)
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
        response = self.client.post(reverse('keras-import'), {'file': sample_file})
        response = json.loads(response.content)
        layerId = sorted(response['net'].keys())
        self.assertEqual(response['result'], 'success')
        self.assertGreaterEqual(len(response['net'][layerId[1]]['params']), 7)
        self.assertGreaterEqual(len(response['net'][layerId[3]]['params']), 7)
        self.assertGreaterEqual(len(response['net'][layerId[6]]['params']), 7)


# ********** Embedding Layers **********
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['GRU']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = recurrent(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'GRU')


# ********** Embed Layer Test *********
项目:CaptchaVariLength    作者:Slyne    | 项目源码 | 文件源码
def create_simpleCnnRnn(image_shape, max_caption_len,vocab_size):
    image_model = Sequential()
    # image_shape : C,W,H
    # input: 100x100 images with 3 channels -> (3, 100, 100) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    image_model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=image_shape))
    image_model.add(BatchNormalization())
    image_model.add(Activation('relu'))
    image_model.add(Convolution2D(32, 3, 3))
    image_model.add(BatchNormalization())
    image_model.add(Activation('relu'))
    image_model.add(MaxPooling2D(pool_size=(2, 2)))
    image_model.add(Dropout(0.25))
    image_model.add(Convolution2D(64, 3, 3, border_mode='valid'))
    image_model.add(BatchNormalization())
    image_model.add(Activation('relu'))
    image_model.add(Convolution2D(64, 3, 3))
    image_model.add(BatchNormalization())
    image_model.add(Activation('relu'))
    image_model.add(MaxPooling2D(pool_size=(2, 2)))
    image_model.add(Dropout(0.25))
    image_model.add(Flatten())
    # Note: Keras does automatic shape inference.
    image_model.add(Dense(128))
    image_model.add(RepeatVector(max_caption_len))
    image_model.add(Bidirectional(GRU(output_dim=128, return_sequences=True)))
    #image_model.add(GRU(output_dim=128, return_sequences=True))
    image_model.add(TimeDistributed(Dense(vocab_size)))
    image_model.add(Activation('softmax'))
    return image_model
项目:Neural-Chatbot    作者:saurabhmathur96    | 项目源码 | 文件源码
def __init__(self, layer, attention_vec, attn_activation='tanh', single_attention_param=False, **kwargs):
        assert isinstance(layer, LSTM) or isinstance(layer, GRU)
        super(AttentionWrapper, self).__init__(layer, **kwargs)
        self.supports_masking = True
        self.attention_vec = attention_vec
        self.attn_activation = activations.get(attn_activation)
        self.single_attention_param = single_attention_param
项目:Neural-Chatbot    作者:saurabhmathur96    | 项目源码 | 文件源码
def Encoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _encoder(x):
            if bidirectional:
                branch_1 = GRU(int(hidden_size/2), activation='linear',
                               return_sequences=return_sequences, go_backwards=False)(x)
                branch_2 = GRU(int(hidden_size/2), activation='linear',
                               return_sequences=return_sequences, go_backwards=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = GRU(hidden_size, activation='linear',
                        return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    else:
        def _encoder(x):
            if bidirectional:
                branch_1 = LSTM(int(hidden_size/2), activation='linear',
                                return_sequences=return_sequences, go_backwards=False)(x)
                branch_2 = LSTM(int(hidden_size/2), activation='linear',
                                return_sequences=return_sequences, go_backwards=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = LSTM(hidden_size, activation='linear',
                         return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    return _encoder
项目:Neural-Chatbot    作者:saurabhmathur96    | 项目源码 | 文件源码
def AttentionDecoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _decoder(x, attention):
            if bidirectional:
                branch_1 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                go_backwards=False), attention, single_attention_param=True)(x)
                branch_2 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                go_backwards=True), attention, single_attention_param=True)(x)
                x = concatenate([branch_1, branch_2])
                return activation(x)
            else:
                x = AttentionWrapper(GRU(hidden_size, activation='linear',
                                         return_sequences=return_sequences), attention, single_attention_param=True)(x)
                x = activation(x)
                return x
    else:
        def _decoder(x, attention):
            if bidirectional:
                branch_1 = AttentionWrapper(LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                 go_backwards=False), attention, single_attention_param=True)(x)
                branch_2 = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences,
                                                go_backwards=True), attention, single_attention_param=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences),
                                     attention, single_attention_param=True)(x)
                x = activation(x)
                return x

    return _decoder
项目:Neural-Chatbot    作者:saurabhmathur96    | 项目源码 | 文件源码
def Decoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _decoder(x):
            if bidirectional:
                x = Bidirectional(
                    GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences))(x)
                x = activation(x)
                return x
            else:
                x = GRU(hidden_size, activation='linear',
                        return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    else:
        def _decoder(x):
            if bidirectional:
                x = Bidirectional(
                    LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences))(x)
                x = activation(x)
                return x
            else:
                x = LSTM(hidden_size, activation='linear',
                         return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    return _decoder
项目:aes-gated-word-char    作者:unkn0wnxx    | 项目源码 | 文件源码
def create_word_gru_model(self, emb_dim, emb_path, vocab_word,
                              vocab_word_size, word_maxlen):
        from keras.layers import GRU
        logger.info('Building word GRU model')
        input_word = Input(shape=(word_maxlen, ), name='input_word')
        word_emb = Embedding(
            vocab_word_size, emb_dim, mask_zero=True,
            name='word_emb')(input_word)
        gru = GRU(
            300,
            return_sequences=True,
            dropout=self.dropout,
            recurrent_dropout=self.recurrent_dropout)(word_emb)
        dropped = Dropout(0.5)(gru)
        mot = MeanOverTime(mask_zero=True)(dropped)
        densed = Dense(self.num_outputs, name='dense')(mot)
        output = Activation('sigmoid')(densed)
        model = Model(inputs=input_word, outputs=output)
        model.get_layer('dense').bias.set_value(self.bias)
        if emb_path:
            from emb_reader import EmbReader as EmbReader
            logger.info('Initializing lookup table')
            emb_reader = EmbReader(emb_path, emb_dim=emb_dim)
            model.get_layer('word_emb').embeddings.set_value(
                emb_reader.get_emb_matrix_given_vocab(
                    vocab_word,
                    model.get_layer('word_emb').embeddings.get_value()))
        logger.info('  Done')
        return model
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def test_delete_channels_gru(channel_index):
    layer = GRU(9, return_sequences=True)
    recursive_test_helper(layer, channel_index)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_gru(self):
        """
        Test the conversion of a GRU layer.
        """
        from keras.layers import GRU

        # Create a simple Keras model
        model = Sequential()
        model.add(GRU(32, input_dim=32, input_length=10))

        input_names = ['input']
        output_names = ['output']
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField('neuralNetwork'))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names) + 1)
        self.assertEquals(input_names[0], spec.description.input[0].name)

        self.assertEquals(32, spec.description.input[1].type.multiArrayType.shape[0])

        self.assertEquals(len(spec.description.output), len(output_names) + 1)
        self.assertEquals(output_names[0], spec.description.output[0].name)
        self.assertEquals(32, spec.description.output[0].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[1].type.multiArrayType.shape[0])

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.gru)
        self.assertEquals(len(layer_0.input), 2)
        self.assertEquals(len(layer_0.output), 2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_gru(self):
        """
        Test the conversion of a GRU layer.
        """
        from keras.layers import GRU

        # Create a simple Keras model
        model = Sequential()
        model.add(GRU(32, input_shape=(32,10)))

        input_names = ['input']
        output_names = ['output']
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField('neuralNetwork'))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names) + 1)
        self.assertEquals(input_names[0], spec.description.input[0].name)

        self.assertEquals(32, spec.description.input[1].type.multiArrayType.shape[0])

        self.assertEquals(len(spec.description.output), len(output_names) + 1)
        self.assertEquals(output_names[0], spec.description.output[0].name)
        self.assertEquals(32, spec.description.output[0].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[1].type.multiArrayType.shape[0])

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.gru)
        self.assertEquals(len(layer_0.input), 2)
        self.assertEquals(len(layer_0.output), 2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_SimpleGRU(self):
        params = dict(
            input_dims=[1, 4, 8], go_backwards=False, activation='tanh',
            stateful=False, unroll=False, return_sequences=False, output_dim=4
        ),
        model = Sequential()
        if keras.__version__[:2] == '2.':
             model.add(GRU(units=params[0]['output_dim'],
                           input_shape=(params[0]['input_dims'][1],params[0]['input_dims'][2]),
                           activation=params[0]['activation'],
                           recurrent_activation='sigmoid',
                           return_sequences=params[0]['return_sequences'],
                           go_backwards=params[0]['go_backwards'],
                           unroll=True,
                           ))
        else:
            model.add(GRU(output_dim=params[0]['output_dim'],
                          input_length=params[0]['input_dims'][1],
                          input_dim=params[0]['input_dims'][2],
                          activation=params[0]['activation'],
                          inner_activation='sigmoid',
                          return_sequences=params[0]['return_sequences'],
                          go_backwards=params[0]['go_backwards'],
                          unroll=True,
                          ))
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
        relative_error, keras_preds, coreml_preds = simple_model_eval(params, model)
        for i in range(len(relative_error)):
            self.assertLessEqual(relative_error[i], 0.01)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_gru_seq(self):
        np.random.seed(1988)
        input_dim = 11
        input_length = 5

        # Define a model
        model = Sequential()
        model.add(GRU(20, input_shape = (input_length, input_dim), return_sequences=False))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', output_blob = 'output')
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_gru_seq_backwards(self, model_precision=_MLMODEL_FULL_PRECISION):
        np.random.seed(1988)
        input_dim = 11
        input_length = 5

        # Define a model
        model = Sequential()
        model.add(GRU(20, input_shape = (input_length, input_dim), return_sequences=False, go_backwards=True))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2-0.1 for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob='data', output_blob='output',
                               model_precision=model_precision)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_intermediate_rcnn_1d(self):

        x_in = Input(shape=(10,2))
        # Conv block 1
        x = Conv1D(3, 3, padding='same', name='interm_rcnn_conv1')(x_in)
        x = BatchNormalization(axis=-1, name='interm_rcnn_bn1')(x)
        x = Activation('elu')(x)
        x = MaxPooling1D(pool_size=2, name='interm_rcnn_pool1')(x)

        out1 = x # out1.shape = (5,3)
        x = GRU(6, name='gru1')(x)
        out2 = x
        model = Model(x_in, [out1,out2])
        # model = Model(x_in, [out2])
        self._test_keras_model(model, mode='random_zero_mean', delta=1e-2)