Python keras.layers 模块,MaxPool2D() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用keras.layers.MaxPool2D()

项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def create_Kao_Onet( weight_path = 'model48.h5'):
    input = Input(shape = [48,48,3])
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1,2],name='prelu1')(x)
    x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1,2],name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1,2],name='prelu3')(x)
    x = MaxPool2D(pool_size=2)(x)
    x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
    x = PReLU(shared_axes=[1,2],name='prelu4')(x)
    x = Permute((3,2,1))(x)
    x = Flatten()(x)
    x = Dense(256, name='conv5') (x)
    x = PReLU(name='prelu5')(x)

    classifier = Dense(2, activation='softmax',name='conv6-1')(x)
    bbox_regress = Dense(4,name='conv6-2')(x)
    landmark_regress = Dense(10,name='conv6-3')(x)
    model = Model([input], [classifier, bbox_regress, landmark_regress])
    model.load_weights(weight_path, by_name=True)

    return model
项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def create_Kao_Rnet (weight_path = 'model24.h5'):
    input = Input(shape=[24, 24, 3])  # change this shape to [None,None,3] to enable arbitraty shape input
    x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
    x = MaxPool2D(pool_size=3,strides=2, padding='same')(x)

    x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)

    x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
    x = Permute((3, 2, 1))(x)
    x = Flatten()(x)
    x = Dense(128, name='conv4')(x)
    x = PReLU( name='prelu4')(x)
    classifier = Dense(2, activation='softmax', name='conv5-1')(x)
    bbox_regress = Dense(4, name='conv5-2')(x)
    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def test_find_activation_layer():
    conv1_filters = 1
    conv2_filters = 1
    dense_units = 1
    model = Sequential()
    model.add(Conv2D(conv1_filters, [3, 3], input_shape=(28, 28, 1), data_format="channels_last", name='conv_1'))
    model.add(Activation('relu', name='act_1'))
    model.add(MaxPool2D((2, 2), name='pool_1'))
    model.add(Conv2D(conv2_filters, [3, 3], data_format="channels_last", name='conv_2'))
    model.add(Activation('relu', name='act_2'))
    model.add(MaxPool2D((2, 2), name='pool_2'))
    model.add(Flatten(name='flat_1'))
    model.add(Dense(dense_units, name='dense_1'))
    model.add(Activation('relu', name='act_3'))
    model.add(Dense(10, name='dense_2'))
    model.add(Activation('softmax', name='act_4'))
    assert find_activation_layer(model.get_layer('conv_1'), 0) == (model.get_layer('act_1'), 0)
    assert find_activation_layer(model.get_layer('conv_2'),
                                 0) == (model.get_layer('act_2'), 0)
    assert find_activation_layer(model.get_layer('dense_1'),
                                 0) == (model.get_layer('act_3'), 0)
    assert find_activation_layer(model.get_layer('dense_2'),
                                 0) == (model.get_layer('act_4'), 0)
项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def create_Kao_Pnet( weight_path = 'model12old.h5'):
    input = Input(shape=[None, None, 3])
    x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1,2],name='PReLU1')(x)
    x = MaxPool2D(pool_size=2)(x)
    x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1,2],name='PReLU2')(x)
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1,2],name='PReLU3')(x)
    classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(x)
    bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(x)
    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def test_delete_channels_maxpooling2d(channel_index, data_format):
    layer = MaxPool2D([2, 2], data_format=data_format)
    layer_test_helper_flatten_2d(layer, channel_index, data_format)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_multi_cnn_model(batch_size,
                          time_step,
                          input_dim,
                          output_dim,
                          kernel_size = 2,
                          conv_dim=(64,32,16),
                          dropout=0.2,
                          stack_loop_num=15):
    model = Sequential()

    first_dim,second_dim,loop_dim = conv_dim

    # build first conventional NN
    # https://stanfordmlgroup.github.io/projects/ecg/

    # use valid
    model.add(Conv2D(first_dim,kernel_size,input_shape=(batch_size,time_step,input_dim)))
    # for reducing overfitting and accelerating speed
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    # then next conv
    model.add(Conv2D(second_dim,kernel_size))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(dropout))
    model.add(Conv2D(second_dim,kernel_size))
    model.add(MaxPool2D(kernel_size))

    # next 15 loop
    for _ in range(stack_loop_num):
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Conv2D(loop_dim,kernel_size))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Dropout(dropout))
        model.add(Conv2D(loop_dim,kernel_size))
        model.add(MaxPool2D(kernel_size))

    # result
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dense(output_dim))

    # this is not classfication problem, so softmax is not needed
    # model.add(Activation('softmax'))
    model.compile(loss='mse',metrics=['mse'],optimizer='adam')

    return model
项目:yolov2    作者:datlife    | 项目源码 | 文件源码
def darknet19(inputs, num_classes=1000, include_top=False):
    """
    DarkNet-19 Architecture Definition

    Args:
      inputs:
      num_classes:
      include_top:

    Returns:
      x: model definition
      fine_grained_layers - a list of fine_grained layers (for detection)
    """
    pass_through_layers = []

    inputs = Preprocessor(yolov2_preprocess_func, name='preprocessor')(inputs)

    x = conv_block(inputs, 32, (3, 3))
    x = MaxPool2D(strides=2)(x)

    x = conv_block(x, 64, (3, 3))
    x = MaxPool2D(strides=2)(x)

    x = conv_block(x, 128, (3, 3))
    x = conv_block(x, 64, (1, 1))
    x = conv_block(x, 128, (3, 3))
    x = MaxPool2D(strides=2)(x)

    x = conv_block(x, 256, (3, 3))
    x = conv_block(x, 128, (1, 1))
    x = conv_block(x, 256, (3, 3))
    x = MaxPool2D(strides=2)(x)

    x = conv_block(x, 512, (3, 3))
    x = conv_block(x, 256, (1, 1))
    x = conv_block(x, 512, (3, 3))
    x = conv_block(x, 256, (1, 1))
    x = conv_block(x, 512, (3, 3))
    pass_through_layers.append(x)
    x = MaxPool2D(strides=2)(x)

    x = conv_block(x, 1024, (3, 3))
    x = conv_block(x, 512, (1, 1))
    x = conv_block(x, 1024, (3, 3))
    x = conv_block(x, 512, (1, 1))
    x = conv_block(x, 1024, (3, 3))    # ---> feature extraction ends here

    if include_top:
        x = Conv2D(num_classes, (1, 1), activation='linear', padding='same')(x)
        x = GlobalAvgPool2D()(x)
        x = Activation(activation='softmax')(x)
        x = Model(inputs, x)

    return x, pass_through_layers