Python keras.layers.pooling 模块,MaxPooling1D() 实例源码

我们从Python开源项目中,提取了以下3个代码示例,用于说明如何使用keras.layers.pooling.MaxPooling1D()

项目:VDCNN    作者:yuhsinliu1993    | 项目源码 | 文件源码
def build_model(num_filters, num_classes, sequence_max_length=512, num_quantized_chars=71, embedding_size=16, learning_rate=0.001, top_k=3, model_path=None):

    inputs = Input(shape=(sequence_max_length, ), dtype='int32', name='inputs')

    embedded_sent = Embedding(num_quantized_chars, embedding_size, input_length=sequence_max_length)(inputs)

    # First conv layer
    conv = Conv1D(filters=64, kernel_size=3, strides=2, padding="same")(embedded_sent)

    # Each ConvBlock with one MaxPooling Layer
    for i in range(len(num_filters)):
        conv = ConvBlockLayer(get_conv_shape(conv), num_filters[i])(conv)
        conv = MaxPooling1D(pool_size=3, strides=2, padding="same")(conv)

    # k-max pooling (Finds values and indices of the k largest entries for the last dimension)
    def _top_k(x):
        x = tf.transpose(x, [0, 2, 1])
        k_max = tf.nn.top_k(x, k=top_k)
        return tf.reshape(k_max[0], (-1, num_filters[-1] * top_k))
    k_max = Lambda(_top_k, output_shape=(num_filters[-1] * top_k,))(conv)

    # 3 fully-connected layer with dropout regularization
    fc1 = Dropout(0.2)(Dense(512, activation='relu', kernel_initializer='he_normal')(k_max))
    fc2 = Dropout(0.2)(Dense(512, activation='relu', kernel_initializer='he_normal')(fc1))
    fc3 = Dense(num_classes, activation='softmax')(fc2)

    # define optimizer
    sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=False)
    model = Model(inputs=inputs, outputs=fc3)
    model.compile(optimizer=sgd, loss='mean_squared_error', metrics=['accuracy'])

    if model_path is not None:
        model.load_weights(model_path)

    return model
项目:Book_DeepLearning_Practice    作者:wac81    | 项目源码 | 文件源码
def text_feature_extract_model1(embedding_size=128, hidden_size=256):
    '''
    this is a model use normal Bi-LSTM and maxpooling extract feature

    examples:
????????? [  1.62172219e-05]
???????? [  1.65377696e-05]
?????,??? [ 1.]
???????? [ 1.]
????????? [  1.76498161e-05]
??????????????16?12?????????? [  1.59666997e-05]
??????????????????? [ 1.]
?????????????? [  1.52662833e-05]
?????????????????????????????????? [ 1.]
???????????????????????????????????????? [  1.52281245e-05]
?????????????????????????? [ 1.]
??????????? [  1.59881820e-05]


    :return:
    '''
    model = Sequential()
    model.add(Embedding(input_dim=max_features,
                        output_dim=embedding_size,
                        input_length=max_seq))
    model.add(Bidirectional(LSTM(hidden_size, return_sequences=True)))
    model.add(TimeDistributed(Dense(embedding_size/2)))
    model.add(Activation('softplus'))
    model.add(MaxPooling1D(5))
    model.add(Flatten())
    # model.add(Dense(2048, activation='softplus'))
    # model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.summary()
    plot(model, to_file="text_feature_extract_model1.png", show_shapes=True)
    return model
项目:gtzan.keras    作者:Hguimaraes    | 项目源码 | 文件源码
def cnn_melspect_1D(input_shape):
    kernel_size = 3
    #activation_func = LeakyReLU()
    activation_func = Activation('relu')
    inputs = Input(input_shape)

    # Convolutional block_1
    conv1 = Conv1D(32, kernel_size)(inputs)
    act1 = activation_func(conv1)
    bn1 = BatchNormalization()(act1)
    pool1 = MaxPooling1D(pool_size=2, strides=2)(bn1)

    # Convolutional block_2
    conv2 = Conv1D(64, kernel_size)(pool1)
    act2 = activation_func(conv2)
    bn2 = BatchNormalization()(act2)
    pool2 = MaxPooling1D(pool_size=2, strides=2)(bn2)

    # Convolutional block_3
    conv3 = Conv1D(128, kernel_size)(pool2)
    act3 = activation_func(conv3)
    bn3 = BatchNormalization()(act3)

    # Global Layers
    gmaxpl = GlobalMaxPooling1D()(bn3)
    gmeanpl = GlobalAveragePooling1D()(bn3)
    mergedlayer = concatenate([gmaxpl, gmeanpl], axis=1)

    # Regular MLP
    dense1 = Dense(512,
        kernel_initializer='glorot_normal',
        bias_initializer='glorot_normal')(mergedlayer)
    actmlp = activation_func(dense1)
    reg = Dropout(0.5)(actmlp)

    dense2 = Dense(512,
        kernel_initializer='glorot_normal',
        bias_initializer='glorot_normal')(reg)
    actmlp = activation_func(dense2)
    reg = Dropout(0.5)(actmlp)

    dense2 = Dense(10, activation='softmax')(reg)

    model = Model(inputs=[inputs], outputs=[dense2])
    return model