Python keras.layers 模块,Masking() 实例源码

我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用keras.layers.Masking()

项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = RNNCell(
            recurrent_layer=rnn_layer(
                self.hidden_size,
                return_sequences=True
            ),
            dense_layer=Dense(
                units=self.encoding_size
            ),
            dense_dropout=0.1
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = BidirectionalRNNEncoder(
            RNNCell(
                rnn_layer(
                    self.hidden_units,
                ),
                Dense(
                    self.cell_units
                ),
                dense_dropout=0.1
            )
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:wtte-rnn    作者:ragulpr    | 项目源码 | 文件源码
def model_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,
                      input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(
        lr=lr), sample_weight_mode='temporal')
    return model
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def create_model(self, rnn_layer):
        inputs = Input(shape=(self.max_length, self.feature_size))
        masked_inputs = Masking(0.0)(inputs)
        outputs = BidirectionalRNNEncoder(
            rnn_layer(
                self.cell_units,
            )
        )(masked_inputs)
        model = Model(inputs, outputs)
        model.compile('sgd', 'mean_squared_error')
        return model
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def fhan2_max(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)


    Si = GlobalMaxPooling1D()(hij)

    wordEncoder = Model(wordInputs, Si)

    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    #sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    Vb = GlobalMaxPooling1D()(hi)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'glorot_uniform', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def han2(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):

    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    #print 'in han2 max-nb-words'
    #print MAX_NB_WORDS

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    alpha_its, Si = AttentionLayer(name='att1')(hij)

    #wordDrop = Dropout(DROPOUTPER, name='wordDrop')(Si)

    wordEncoder = Model(wordInputs, Si)
    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(sentenceMasking) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    alpha_s, Vb = AttentionLayer(name='att2')(hi)

    #sentDrop = Dropout(DROPOUTPER, name='sentDrop')(Vb)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model, wordEncoder
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_masking():
    layer_test(core.Masking,
               kwargs={},
               input_shape=(3, 2, 3))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_merge_mask_2d():
    from keras.layers import Input, merge, Masking
    from keras.models import Model

    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = Input(shape=(3,))
    input_b = Input(shape=(3,))

    # masks
    masked_a = Masking(mask_value=0)(input_a)
    masked_b = Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = merge([masked_a, masked_b], mode='sum')
    merged_concat = merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], nb_epoch=1)

    # test concatenation
    model_concat = Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], nb_epoch=1)

    # test concatenation with masked and non-masked inputs
    model_concat = Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], nb_epoch=1)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_model(timestep,input_dim,output_dim,dropout=0.5,recurrent_layers_num=4,cnn_layers_num=6,lr=0.001):
    inp = Input(shape=(timestep,input_dim))
    output = TimeDistributed(Masking(mask_value=0))(inp)
    #output = inp
    output = Conv1D(128, 1)(output)
    output = BatchNormalization()(output)
    output = Activation('relu')(output)

    output = first_block(output, (64, 128), dropout=dropout)


    output = Dropout(dropout)(output)
    for _ in range(cnn_layers_num):
        output = repeated_block(output, (64, 128), dropout=dropout)

    output = Flatten()(output)
    #output = LSTM(128, return_sequences=False)(output)

    output = BatchNormalization()(output)
    output = Activation('relu')(output)
    output = Dense(output_dim)(output)


    model = Model(inp,output)

    optimizer = Adam(lr=lr)

    model.compile(optimizer,'mse',['mae'])
    return model
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_import(self):
        model = Sequential()
        model.add(Masking(mask_value=0., input_shape=(5, 100)))
        model.build()
        self.keras_type_test(model, 0, 'Masking')


# ********** Convolutional Layers **********
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['Masking']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = masking(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Masking')


# ********** Vision Layers Test **********
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def masking(layer, layer_in, layerId):
    out = {layerId: Masking(mask_value=layer['params']['mask_value'])(*layer_in)}
    return out


# ********** Convolution Layers **********
项目:Color-Names    作者:airalcorn2    | 项目源码 | 文件源码
def build_color2words_model(max_tokens, dim):
    """Build a model that learns to generate words from colors.

    :param max_tokens:
    :param dim:
    :return:
    """
    model = Sequential()
    model.add(Masking(mask_value = -1, input_shape = (max_tokens, 3)))
    model.add(LSTM(256, return_sequences = True))
    model.add(TimeDistributed(Dense(dim)))

    model.compile(loss = "mse", optimizer = "adam")
    return model
项目:vie-ner-lstm    作者:pth1993    | 项目源码 | 文件源码
def building_ner(num_lstm_layer, num_hidden_node, dropout, time_step, vector_length, output_lenght):
    model = Sequential()
    model.add(Masking(mask_value=0., input_shape=(time_step, vector_length)))
    for i in range(num_lstm_layer-1):
        model.add(Bidirectional(LSTM(units=num_hidden_node, return_sequences=True, dropout=dropout,
                                     recurrent_dropout=dropout)))
    model.add(Bidirectional(LSTM(units=num_hidden_node, return_sequences=True, dropout=dropout,
                                 recurrent_dropout=dropout), merge_mode='concat'))
    model.add(TimeDistributed(Dense(output_lenght)))
    model.add(Activation('softmax'))
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    return model
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def assemble_rnn(params, final_reshape=True):
    """Construct an RNN/LSTM/GRU model of the form: X-[H1-H2-...-HN]-Y.
    All the H-layers are optional recurrent layers and depend on whether they
    are specified in the params dictionary.
    """
    # Input layer
    input_shape = params['input_shape']
    inputs = layers.Input(shape=input_shape)
    # inputs = layers.Input(batch_shape=[20] + list(input_shape))

    # Masking layer
    previous = layers.Masking(mask_value=0.0)(inputs)

    # Hidden layers
    for layer in params['hidden_layers']:
        Layer = layers.deserialize(
            {'class_name': layer['name'], 'config': layer['config']})
        previous = Layer(previous)
        if 'dropout' in layer and layer['dropout'] is not None:
            previous = layers.Dropout(layer['dropout'])(previous)
        if 'batch_norm' in layer and layer['batch_norm'] is not None:
            previous = layers.BatchNormalization(**layer['batch_norm'])(previous)

    # Output layer
    output_shape = params['output_shape']
    output_dim = np.prod(output_shape)
    outputs = layers.Dense(output_dim)(previous)

    if final_reshape:
        outputs = layers.Reshape(output_shape)(outputs)

    return KerasModel(inputs=inputs, outputs=outputs)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_masking():
    layer_test(core.Masking,
               kwargs={},
               input_shape=(3, 2, 3))
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_merge_mask_2d():
    from keras.layers import Input, merge, Masking
    from keras.models import Model

    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = Input(shape=(3,))
    input_b = Input(shape=(3,))

    # masks
    masked_a = Masking(mask_value=0)(input_a)
    masked_b = Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = merge([masked_a, masked_b], mode='sum')
    merged_concat = merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], nb_epoch=1)

    # test concatenation
    model_concat = Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], nb_epoch=1)

    # test concatenation with masked and non-masked inputs
    model_concat = Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], nb_epoch=1)
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def train(self,sequences,labels,features,publish_years,pids,superparams,cut=None,predict_year=2000,max_iter=0,max_outer_iter=200):
        from keras.layers import Input, Dense, Masking, LSTM, Activation, Dropout, merge
        from keras.models import Model
        from keras.regularizers import l1,l2

        f = Input(shape=(1,len(features[0])),dtype='float')
        # features[paper][feature], sequences[paper][day][feature]
        k = Dense(len(sequences[0][0]),activation='relu',W_regularizer=l1(self.l1))(f)
        # k = merge([k,k],mode='concat',concat_axis=1)
        k1 = Dropout(0.5)(k)

        k2 = Input(shape=(len(sequences[0]),len(sequences[0][0])),dtype='float')

        g1 = merge([k1,k2],mode='concat',concat_axis=1)

        m1 = Masking(mask_value= -1.)(g1)

        n1 = LSTM(128,W_regularizer=l2(self.l2),dropout_W=0.5,dropout_U=0.5)(m1)
        n1 = Dense(2,activation='softmax')(n1)

        model = Model(inputs=[f,k2], outputs=[n1])
        model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])
        model.fit(
            [numpy.array([[f] for f in features]),numpy.array(sequences)], [numpy.array(labels)],
            epochs=500, batch_size=1,
            verbose=1,validation_split=0.2)
        self.params['model'] = model
        # embeddings = model.layers[1].W.get_value()
        return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_masking():
    layer_test(core.Masking,
               kwargs={},
               input_shape=(3, 2, 3))
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_merge_mask_2d():
    from keras.layers import Input, merge, Masking
    from keras.models import Model

    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # inputs
    input_a = Input(shape=(3,))
    input_b = Input(shape=(3,))

    # masks
    masked_a = Masking(mask_value=0)(input_a)
    masked_b = Masking(mask_value=0)(input_b)

    # three different types of merging
    merged_sum = merge([masked_a, masked_b], mode='sum')
    merged_concat = merge([masked_a, masked_b], mode='concat', concat_axis=1)
    merged_concat_mixed = merge([masked_a, input_b], mode='concat', concat_axis=1)

    # test sum
    model_sum = Model([input_a, input_b], [merged_sum])
    model_sum.compile(loss='mse', optimizer='sgd')
    model_sum.fit([rand(2, 3), rand(2, 3)], [rand(2, 3)], nb_epoch=1)

    # test concatenation
    model_concat = Model([input_a, input_b], [merged_concat])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], nb_epoch=1)

    # test concatenation with masked and non-masked inputs
    model_concat = Model([input_a, input_b], [merged_concat_mixed])
    model_concat.compile(loss='mse', optimizer='sgd')
    model_concat.fit([rand(2, 3), rand(2, 3)], [rand(2, 6)], nb_epoch=1)
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def fhan3_avg(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):

    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    #alpha_its, Si = AttentionLayer(name='att1')(hij)
    wordDrop = Dropout(DROPOUTPER, name='wordDrop')(hij)

    word_pool = GlobalAveragePooling1D()(wordDrop)      

    wordEncoder = Model(wordInputs, word_pool)

    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    #sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    #alpha_s, Vb = AttentionLayer(name='att2')(hi)
    sentDrop = Dropout(DROPOUTPER, name='sentDrop')(hi)

    sent_pool = GlobalAveragePooling1D()(sentDrop)         

    Vb = Reshape((1, sent_pool._keras_shape[1]))(sent_pool)

    #-----------------------------------------------------------------------------------------------

    headlineInput = Input(shape=(MAX_WORDS,), name='headlineInput',dtype='float32')

    headlineEmb = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, mask_zero=False, name='headlineEmb')(headlineInput)

    #Vb = Masking(mask_value=0.0, name='Vb')(Vb)        
    headlineBodyEmb = concatenate([headlineEmb, Vb], axis=1, name='headlineBodyEmb')

    h3 = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru3')(headlineBodyEmb)

    #a3, Vn = AttentionLayer(name='att3')(h3)

    headDrop = Dropout(DROPOUTPER, name='3Drop')(h3)

    head_pool = GlobalAveragePooling1D()(headDrop) 

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(head_pool)
    model = Model(inputs=[docInputs, headlineInput] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def fhan3_max(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):

    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    #alpha_its, Si = AttentionLayer(name='att1')(hij)
    wordDrop = Dropout(DROPOUTPER, name='wordDrop')(hij)

    word_max = GlobalMaxPooling1D()(wordDrop)      

    wordEncoder = Model(wordInputs, word_max)

    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    #sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    #alpha_s, Vb = AttentionLayer(name='att2')(hi)
    sentDrop = Dropout(DROPOUTPER, name='sentDrop')(hi)

    sent_max = GlobalMaxPooling1D()(sentDrop)         

    Vb = Reshape((1, sent_max._keras_shape[1]))(sent_max)

    #-----------------------------------------------------------------------------------------------

    headlineInput = Input(shape=(MAX_WORDS,), name='headlineInput',dtype='float32')

    headlineEmb = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, mask_zero=False, name='headlineEmb')(headlineInput)

    #Vb = Masking(mask_value=0.0, name='Vb')(Vb)        
    headlineBodyEmb = concatenate([headlineEmb, Vb], axis=1, name='headlineBodyEmb')

    h3 = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru3')(headlineBodyEmb)

    #a3, Vn = AttentionLayer(name='att3')(h3)

    headDrop = Dropout(DROPOUTPER, name='3Drop')(h3)

    head_max = GlobalMaxPooling1D()(headDrop) 

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(head_max)
    model = Model(inputs=[docInputs, headlineInput] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def fhan3_pretrain(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_WORDS,), name='word1', dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM,  weights=[embedding_matrix], mask_zero=True, trainable=True, name='emb1')(wordInputs) #Assuming all the sentences have same number of words. Check for input_length again.

    hij = Bidirectional(GRU(WORDGRU, name='gru1', return_sequences=True))(wordEmbedding)

    wordDrop = Dropout(DROPOUTPER, name='drop1')(hij)

    alpha_its, Si  = AttentionLayer(name='att1')(wordDrop)  

    wordEncoder = Model(wordInputs, Si)

    wordEncoder.load_weights('han1_pretrain.h5', by_name=True)

    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(sentenceMasking) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)   

    sentDrop = Dropout(DROPOUTPER, name='sentDrop')(hi)

    alpha_s, Vb = AttentionLayer(name='att2')(sentDrop)

    Vb = Reshape((1, Vb._keras_shape[1]))(Vb)

    #-----------------------------------------------------------------------------------------------

    headlineInput = Input(shape=(MAX_WORDS,), name='headlineInput',dtype='float32')

    headlineEmb = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, mask_zero=True, name='headlineEmb')(headlineInput)

    Vb = Masking(mask_value=0.0, name='Vb')(Vb)     
    headlineBodyEmb = concatenate([headlineEmb, Vb], axis=1, name='headlineBodyEmb')

    h3 = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru3')(headlineBodyEmb)    

    h3Drop =  Dropout(DROPOUTPER, name='h3drop')(h3)

    a3, Vn = AttentionLayer(name='att3')(h3Drop)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(Vn)
    model = Model(inputs=[docInputs, headlineInput] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def HAN(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):

    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    wordDrop = Dropout(DROPOUTPER, name='wordDrop')(hij)

    alpha_its, Si = AttentionLayer(name='att1')(wordDrop)


    wordEncoder = Model(wordInputs, Si)
    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(sentenceMasking) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    sentDrop = Dropout(DROPOUTPER, name='sentDrop')(hi)

    alpha_s, Vb = AttentionLayer(name='att2')(sentDrop)

    Vb = Reshape((1, Vb._keras_shape[1]))(Vb)

    #-----------------------------------------------------------------------------------------------

    headlineInput = Input(shape=(MAX_WORDS,), name='headlineInput',dtype='float32')

    headlineEmb = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, mask_zero=True, name='headlineEmb')(headlineInput)

    Vb = Masking(mask_value=0.0, name='Vb')(Vb)     

    headlineBodyEmb = concatenate([headlineEmb, Vb], axis=1, name='headlineBodyEmb')

    h3 = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru3')(headlineBodyEmb)

    headDrop = Dropout(DROPOUTPER, name='3Drop')(h3)

    a3, Vn = AttentionLayer(name='att3')(headDrop)


    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(Vn)
    model = Model(inputs=[docInputs, headlineInput] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
项目:luvina    作者:oarriaga    | 项目源码 | 文件源码
def SiameseLSTM(max_token_length, hidden_size, embedding_size=300):
    text_input_1 = Input(shape=(max_token_length, embedding_size),
                         name='text_1')
    text_mask_1 = Masking(mask_value=0.0, name='text_mask_1')(text_input_1)
    # text_dropout_1 = Dropout(.5, name='text_dropout_1')(text_mask_1)

    text_input_2 = Input(shape=(max_token_length, embedding_size),
                         name='text_2')
    text_mask_2 = Masking(mask_value=0.0, name='text_mask_2')(text_input_2)
    # text_dropout_2 = Dropout(.5, name='text_dropout_2')(text_mask_2)

    lstm_1_a = Bidirectional(GRU(units=hidden_size,
                                 return_sequences=True,
                                 name='RNN_1_a'))(text_mask_1)

    lstm_1_b = Bidirectional(GRU(units=hidden_size,
                                 return_sequences=False,
                                 name='RNN_1_b'))(lstm_1_a)

    """
    lstm_1_c = Bidirectional(GRU(units=hidden_size,
                                 return_sequences=False,
                                 name='RNN_1_c'))(lstm_1_b)
    """

    lstm_2_a = Bidirectional(GRU(units=hidden_size,
                                 return_sequences=True,
                                 name='RNN_2_a'))(text_mask_2)

    lstm_2_b = Bidirectional(GRU(units=hidden_size,
                                 return_sequences=False,
                                 name='RNN_2_b'))(lstm_2_a)

    """
    lstm_2_c = Bidirectional(GRU(units=hidden_size,
                                 return_sequences=False,
                                 name='RNN_2_c'))(lstm_2_b)
    """

    cosine_similarity = Dot(axes=1, normalize=True,
                            name='cosine_similarity')([lstm_1_b, lstm_2_b])

    model = Model(inputs=[text_input_1, text_input_2],
                  outputs=cosine_similarity)

    return model