Python keras.layers 模块,GlobalMaxPooling1D() 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用keras.layers.GlobalMaxPooling1D()

项目:semeval2017-scienceie    作者:UKPLab    | 项目源码 | 文件源码
def build_cnn(input_shape, output_dim,nb_filter):
    clf = Sequential()
    clf.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape))
    clf.add(GlobalMaxPooling1D())
    clf.add(Dense(100))
    clf.add(Dropout(0.2))
    clf.add(Activation("tanh"))
    clf.add(Dense(output_dim=output_dim, activation='softmax'))

    clf.compile(optimizer='adagrad',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
    return clf

# just one filter
项目:semeval2017-scienceie    作者:UKPLab    | 项目源码 | 文件源码
def build_cnn_char(input_dim, output_dim,nb_filter):
    clf = Sequential()
    clf.add(Embedding(input_dim,
                      32, # character embedding size
                      input_length=maxlen,
                      dropout=0.2))
    clf.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=3,border_mode="valid",activation="relu",subsample_length=1))
    clf.add(GlobalMaxPooling1D())
    clf.add(Dense(100))
    clf.add(Dropout(0.2))
    clf.add(Activation("tanh"))
    clf.add(Dense(output_dim=output_dim, activation='softmax'))

    clf.compile(optimizer='adagrad',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
    return clf

# just one filter
项目:keras-text    作者:raghakot    | 项目源码 | 文件源码
def build_model(self, x):
        pooled_tensors = []
        for filter_size in self.filter_sizes:
            x_i = Conv1D(self.num_filters, filter_size, activation='elu', **self.conv_kwargs)(x)
            x_i = GlobalMaxPooling1D()(x_i)
            pooled_tensors.append(x_i)

        x = pooled_tensors[0] if len(self.filter_sizes) == 1 else concatenate(pooled_tensors, axis=-1)
        return x
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_import(self):
        # Global Pooling 1D
        model = Sequential()
        model.add(GlobalMaxPooling1D(input_shape=(1, 16)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Global Pooling 2D
        model = Sequential()
        model.add(GlobalMaxPooling2D(input_shape=(1, 16, 16)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 1D
        model = Sequential()
        model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(1, 16)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Pooling 2D
        model = Sequential()
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(1, 16, 16)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 3D
        model = Sequential()
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same',
                               input_shape=(1, 16, 16, 16)))
        model.build()
        self.keras_param_test(model, 0, 11)


# ********** Locally-connected Layers **********
项目:Color-Names    作者:airalcorn2    | 项目源码 | 文件源码
def build_words2color_model(max_tokens, dim):
    """Build a model that learns to generate colors from words.

    :param max_tokens:
    :param dim:
    :return:
    """
    model = Sequential()
    model.add(Conv1D(128, 1, input_shape = (max_tokens, dim), activation = "tanh"))
    model.add(GlobalMaxPooling1D())
    model.add(Dropout(0.5))
    model.add(Dense(3))

    model.compile(loss = "mse", optimizer = "sgd")
    return model
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_global_max_pooling_1d(self):
        np.random.seed(1988)
        input_dim = 2
        input_length = 10
        filter_length = 3
        nb_filters = 4
        model = Sequential()
        model.add(Conv1D(nb_filters, kernel_size = filter_length, padding='same',
            input_shape=(input_length, input_dim)))
        model.add(GlobalMaxPooling1D())
        self._test_keras_model(model)
项目:Neural-Headline-Generator-CN    作者:QuantumLiu    | 项目源码 | 文件源码
def c2r(dic_len,input_length,output_length,emb_dim=128,hidden=512,nb_filter=64,deepth=(1,1),stride=3):
    model = Sequential()
    model.add(Embedding(input_dim=dic_len, output_dim=emb_dim, input_length=input_length))
    for l in range(deepth[0]):
        model.add(Conv1D(nb_filter,3,activation='relu'))
    model.add(GlobalMaxPooling1D())
    model.add(Dropout(0.5))
    model.add(RepeatVector(output_length))
    for l in range(deepth[0]):
        model.add(LSTM(hidden, return_sequences=True))
    model.add(TimeDistributed(Dense(units=dic_len, activation='softmax')))
    model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['acc'])
    return model
项目:baseline    作者:dpressel    | 项目源码 | 文件源码
def create(w2v, labels, **kwargs):
        model = ConvModel()
        model.labels = labels
        model.vocab = w2v.vocab
        filtsz = kwargs['filtsz']
        pdrop = kwargs.get('dropout', 0.5)
        mxlen = int(kwargs.get('mxlen', 100))
        cmotsz = kwargs['cmotsz']
        finetune = bool(kwargs.get('finetune', True))
        nc = len(labels)
        x = Input(shape=(mxlen,), dtype='int32', name='input')

        vocab_size = w2v.weights.shape[0]
        embedding_dim = w2v.dsz

        lut = Embedding(input_dim=vocab_size, output_dim=embedding_dim, weights=[w2v.weights], input_length=mxlen, trainable=finetune)

        embed = lut(x)

        mots = []
        for i, fsz in enumerate(filtsz):
            conv = Conv1D(cmotsz, fsz, activation='relu')(embed)
            gmp = GlobalMaxPooling1D()(conv)
            mots.append(gmp)

        joined = merge(mots, mode='concat')
        cmotsz_all = cmotsz * len(filtsz)
        drop1 = Dropout(pdrop)(joined)

        input_dim = cmotsz_all
        last_layer = drop1
        dense = Dense(output_dim=nc, input_dim=input_dim, activation='softmax')(last_layer)
        model.impl = keras.models.Model(input=[x], output=[dense])
        return model
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def LSTMLayer(embed_matrix, embed_input, sequence_length, dropout_prob, hidden_dims, embedding_dim=300, lstm_dim=100):
    model = Sequential()
    model.add(Embedding(embed_input, embedding_dim, input_length=sequence_length, weights=[embed_matrix]))
    model.add(Bidirectional(MGU(lstm_dim, return_sequences=True)))
    #model.add(AttentionLayer(lstm_dim))
    model.add(GlobalMaxPooling1D())
    # 3. Hidden Layer
    model.add(Dense(hidden_dims))
    model.add(Dropout(dropout_prob[1]))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='RMSprop', metrics=['accuracy'])
    return model
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def KeywordLayer(sequence_length, embed_input, embedding_dim, embed_matrix):
    model = Sequential()
    model.add(Embedding(embed_input, embedding_dim, input_length=sequence_length, weights=[embed_matrix]))
    model.add(GlobalMaxPooling1D())
    return model
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def HierarchicalRNN(embed_matrix, max_words, ans_cnt, sequence_length, embedding_dim, lstm_dim=100):
    ''' Hierachical RNN model
        Input: (batch_size, answers, answer words)
    Args:
        embed_matrix: word embedding
        max words:    word dict size of embedding layer
        ans_cnt:      answer count
        sequence_length: answer words count
        embedding_dim: embedding dimention
        lstm_dim:
    '''
    hnn = Sequential()
    x = Input(shape=(ans_cnt, sequence_length))
    # 1. time distributed word embedding: (None, steps, words, embed_dim)
    words_embed = TimeDistributed(Embedding(max_words, embedding_dim,input_length=sequence_length,weights=[embed_matrix]))(x)
    # 2. word level lstm embedding: --> (None, steps/sentence_num, hidden/sent_words, hidden_dim)
    word_lstm = TimeDistributed(Bidirectional(MGU(lstm_dim, return_sequences=True)))(words_embed)

    # 3. average pooling : --> (None,steps,dim)
    word_avg = TimeDistributed(GlobalMaxPooling1D())(word_lstm)
    #word_avg = TimeDistributed(AttentionLayer(lstm_dim*2))(word_lstm)

    # 4.  sentence lstm:  --> (None, hidden, hidden_dim)
    sent_lstm = Bidirectional(MGU(lstm_dim, return_sequences=True))(word_avg)

    # 5. pooling:  --> (None, hidden_dim)
    sent_avg = GlobalMaxPooling1D()(sent_lstm)
    #sent_avg = AttentionLayer(lstm_dim*2)(sent_lstm)
    model = Model(input=x, output=sent_avg)
    hnn.add(model)
    return hnn


# vim: set expandtab ts=4 sw=4 sts=4 tw=100:
项目:DeepLearn    作者:GauravBh1010tt    | 项目源码 | 文件源码
def trainCNN(obj, dataset_headLines, dataset_body):
    embedding_dim = 300
    LSTM_neurons = 50
    dense_neuron = 16
    dimx = 100
    dimy = 200
    lamda = 0.0
    nb_filter = 100
    filter_length = 4
    vocab_size = 10000
    batch_size = 50
    epochs = 5
    ntn_out = 16
    ntn_in = nb_filter 
    state = False


    train_head,train_body,embedding_matrix = obj.process_data(sent_Q=dataset_headLines,
                                                     sent_A=dataset_body,dimx=dimx,dimy=dimy,
                                                     wordVec_model = wordVec_model)    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')
    #x = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimx)(inpx)
    x = word2vec_embedding_layer(embedding_matrix)(inpx)  
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    #y = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimy)(inpy)
    y = word2vec_embedding_layer(embedding_matrix)(inpy)
    ques = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
                         border_mode='valid', activation='relu',
                         subsample_length=1)(x)

    ans = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
                        border_mode='valid', activation='relu',
                        subsample_length=1)(y)

    #hx = Lambda(max_1d, output_shape=(nb_filter,))(ques)
    #hy = Lambda(max_1d, output_shape=(nb_filter,))(ans)
    hx = GlobalMaxPooling1D()(ques)
    hy = GlobalMaxPooling1D()(ans)
    #wordVec_model = []
    #h =  Merge(mode="concat",name='h')([hx,hy])

    h1 = Multiply()([hx,hy])
    h2 = Abs()([hx,hy])

    h =  Merge(mode="concat",name='h')([h1,h2])
    #h = NeuralTensorLayer(output_dim=1,input_dim=ntn_in)([hx,hy])
    #h = ntn_layer(ntn_in,ntn_out,activation=None)([hx,hy])
    #score = h
    wrap = Dense(dense_neuron, activation='relu',name='wrap')(h)
    #score = Dense(1,activation='sigmoid',name='score')(h)
    #wrap = Dense(dense_neuron,activation='relu',name='wrap')(h)
    score = Dense(4,activation='softmax',name='score')(wrap)

    #score=K.clip(score,1e-7,1.0-1e-7)
    #corr = CorrelationRegularization(-lamda)([hx,hy])
    #model = Model( [inpx,inpy],[score,corr])
    model = Model( [inpx,inpy],score)
    model.compile( loss='categorical_crossentropy',optimizer="adadelta",metrics=['accuracy'])    
    return model,train_head,train_body
项目:tartarus    作者:sergiooramas    | 项目源码 | 文件源码
def get_model_4(params):
    embedding_weights = pickle.load(open(common.TRAINDATA_DIR+"/embedding_weights_w2v_%s.pk" % params['embeddings_suffix'],"rb"))
    graph_in = Input(shape=(params['sequence_length'], params['embedding_dim']))
    convs = []
    for fsz in params['filter_sizes']:
        conv = Convolution1D(nb_filter=params['num_filters'],
                             filter_length=fsz,
                             border_mode='valid',
                             activation='relu',
                             subsample_length=1)
        x = conv(graph_in)
        logging.debug("Filter size: %s" % fsz)
        logging.debug("Output CNN: %s" % str(conv.output_shape))

        pool = GlobalMaxPooling1D()
        x = pool(x)
        logging.debug("Output Pooling: %s" % str(pool.output_shape))
        convs.append(x)

    if len(params['filter_sizes'])>1:
        merge = Merge(mode='concat')
        out = merge(convs)
        logging.debug("Merge: %s" % str(merge.output_shape))
    else:
        out = convs[0]

    graph = Model(input=graph_in, output=out)

    # main sequential model
    model = Sequential()
    if not params['model_variation']=='CNN-static':
        model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'],
                            weights=embedding_weights))
    model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim'])))
    model.add(graph)
    model.add(Dense(params['n_dense']))
    model.add(Dropout(params['dropout_prob'][1]))
    model.add(Activation('relu'))

    model.add(Dense(output_dim=params["n_out"], init="uniform"))
    model.add(Activation(params['final_activation']))
    logging.debug("Output CNN: %s" % str(model.output_shape))

    if params['final_activation'] == 'linear':
        model.add(Lambda(lambda x :K.l2_normalize(x, axis=1)))

    return model

# word2vec ARCH with LSTM
项目:semeval2017-scienceie    作者:UKPLab    | 项目源码 | 文件源码
def build_cnn_char_threeModels(input_dim, output_dim,nb_filter,filter_size=3):
    left = Sequential()
    left.add(Embedding(input_dim,
             32, # character embedding size
             input_length=L,
             dropout=0.2))
    left.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
    left.add(GlobalMaxPooling1D())
    left.add(Dense(100))
    left.add(Dropout(0.2))
    left.add(Activation("tanh"))

    center = Sequential()
    center.add(Embedding(input_dim,
             32, # character embedding size
             input_length=M,
             dropout=0.2))
    center.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
    center.add(GlobalMaxPooling1D())
    center.add(Dense(100))
    center.add(Dropout(0.2))
    center.add(Activation("tanh"))

    right = Sequential()
    right.add(Embedding(input_dim,
             32, # character embedding size
             input_length=R,
             dropout=0.2))
    right.add(Convolution1D(nb_filter=nb_filter,
                          filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
    right.add(GlobalMaxPooling1D())
    right.add(Dense(100))
    right.add(Dropout(0.2))
    right.add(Activation("tanh"))

    clf = Sequential()
    clf.add(Merge([left,center,right],mode="concat"))
    clf.add(Dense(output_dim=output_dim, activation='softmax'))

    clf.compile(optimizer='adagrad',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
    return clf