Python keras.layers 模块,AveragePooling2D() 实例源码

我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用keras.layers.AveragePooling2D()

项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def create_actor_network(self, state_size, action_dim):
        """Create actor network."""
        print ("[MESSAGE] Build actor network.""")
        S = Input(shape=state_size)
        h_0 = Conv2D(32, (3, 3), padding="same",
                     kernel_regularizer=l2(0.0001),
                     activation="relu")(S)
        h_1 = Conv2D(32, (3, 3), padding="same",
                     kernel_regularizer=l2(0.0001),
                     activation="relu")(h_0)
        h_1 = AveragePooling2D(2, 2)(h_1)
        h_1 = Flatten()(h_1)
        h_1 = Dense(600, activation="relu")(h_1)
        A = Dense(action_dim, activation="softmax")(h_1)

        model = Model(inputs=S, outputs=A)

        return model, model.trainable_weights, S
项目:DenseNetKeras    作者:SulemanKazi    | 项目源码 | 文件源码
def addTransition(previousLayer, nChannels, nOutChannels, dropRate, blockNum):

    bn = BatchNormalization(name = 'tr_BatchNorm_{}'.format(blockNum), axis = 1)(previousLayer)

    relu = Activation('relu', name ='tr_relu_{}'.format(blockNum))(bn)

    conv = Convolution2D(nOutChannels, 1, 1, border_mode='same', name='tr_conv_{}'.format(blockNum))(relu)

    if dropRate is not None:

        dp = Dropout(dropRate, name='tr_dropout_{}'.format)(conv)

        avgPool = AveragePooling2D(pool_size=(2, 2))(dp)

    else:
        avgPool = AveragePooling2D(pool_size=(2, 2))(conv)

    return avgPool
项目:hintbot    作者:madebyollin    | 项目源码 | 文件源码
def createModel(w=None,h=None):
    # Input placeholder
    original = Input(shape=(w, h, 4), name='icon_goes_here')

    # Model layer stack
    x = original
    x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = Convolution2D(64, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = AveragePooling2D((2, 2), border_mode='valid')(x)
    x = Convolution2D(16, 4, 4, activation='relu', border_mode='same', b_regularizer=l2(0.1))(x)
    x = Convolution2D(4, 4, 4, activation='relu', border_mode='same',  b_regularizer=l2(0.1))(x)
    downscaled = x

    # Compile model
    hintbot = Model(input=original, output=downscaled)
    hintbot.compile(optimizer='adam', loss='mean_squared_error')
    # Train
    if (os.path.isfile(load_weights_filepath)):
        hintbot.load_weights(load_weights_filepath)
    return hintbot
项目:Gene-prediction    作者:sriram2093    | 项目源码 | 文件源码
def classifier_layers(x, input_shape, stage_num, trainable=False):

    # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
    # (hence a smaller stride in the region that follows the ROI pool)
    if K.backend() == 'tensorflow':
        x = conv_block_td(x, 3, [512, 512, 1024], stage=stage_num, block='a', input_shape=input_shape, strides=(1, 2), trainable=trainable)
    elif K.backend() == 'theano':
        x = conv_block_td(x, 3, [512, 512, 1024], stage=stage_num, block='a', input_shape=input_shape, strides=(1, 1), trainable=trainable)

    print 'INFO: Classifier layers x block a: ', x
    x = identity_block_td(x, 3, [512, 512, 1024], stage=stage_num, block='c', trainable=trainable)
    print 'INFO: Classifier layers x block b: ', x
    x = identity_block_td(x, 3, [512, 512, 1024], stage=stage_num, block='d', trainable=trainable)
    print 'INFO: Classifier layers x block c: ', x

    #x = TimeDistributed(AveragePooling2D((2, 1)), name='avg_pool')(x)

    return x
项目:cnn_finetune    作者:flyyufelix    | 项目源码 | 文件源码
def block_inception_a(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 96, 1, 1)

    branch_1 = conv2d_bn(input, 64, 1, 1)
    branch_1 = conv2d_bn(branch_1, 96, 3, 3)

    branch_2 = conv2d_bn(input, 64, 1, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
    branch_3 = conv2d_bn(branch_3, 96, 1, 1)

    x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
    return x
项目:cnn_finetune    作者:flyyufelix    | 项目源码 | 文件源码
def block_inception_b(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 1, 1)

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 1, 7)
    branch_1 = conv2d_bn(branch_1, 256, 7, 1)

    branch_2 = conv2d_bn(input, 192, 1, 1)
    branch_2 = conv2d_bn(branch_2, 192, 7, 1)
    branch_2 = conv2d_bn(branch_2, 224, 1, 7)
    branch_2 = conv2d_bn(branch_2, 224, 7, 1)
    branch_2 = conv2d_bn(branch_2, 256, 1, 7)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
    branch_3 = conv2d_bn(branch_3, 128, 1, 1)

    x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
    return x
项目:SerpentAI    作者:SerpentAI    | 项目源码 | 文件源码
def _initialize_model(self):
        input_layer = Input(shape=self.input_shape)

        tower_1 = Convolution2D(16, 1, 1, border_mode="same", activation="elu")(input_layer)
        tower_1 = Convolution2D(16, 3, 3, border_mode="same", activation="elu")(tower_1)

        tower_2 = Convolution2D(16, 1, 1, border_mode="same", activation="elu")(input_layer)
        tower_2 = Convolution2D(16, 3, 3, border_mode="same", activation="elu")(tower_2)
        tower_2 = Convolution2D(16, 3, 3, border_mode="same", activation="elu")(tower_2)

        tower_3 = MaxPooling2D((3, 3), strides=(1, 1), border_mode="same")(input_layer)
        tower_3 = Convolution2D(16, 1, 1, border_mode="same", activation="elu")(tower_3)

        merged_layer = merge([tower_1, tower_2, tower_3], mode="concat", concat_axis=1)

        output = AveragePooling2D((7, 7), strides=(8, 8))(merged_layer)
        output = Flatten()(output)
        output = Dense(self.action_count)(output)

        model = Model(input=input_layer, output=output)
        model.compile(rmsprop(lr=self.model_learning_rate, clipvalue=1), "mse")

        return model
项目:deepanalytics_compe26_benchmark    作者:takagiwa-ss    | 项目源码 | 文件源码
def resnet(repetition=2, k=1):
    '''Wide Residual Network (with a slight modification)
    depth == repetition*6 + 2
    '''
    from keras.models import Model
    from keras.layers import Input, Dense, Flatten, AveragePooling2D
    from keras.regularizers import l2

    input_shape = (1, _img_len, _img_len)
    output_dim = len(_columns)

    x = Input(shape=input_shape)

    z = conv2d(nb_filter=8, k_size=5, downsample=True)(x)        # out_shape ==    8, _img_len/ 2, _img_len/ 2
    z = bn_lrelu(0.01)(z)
    z = residual_block(nb_filter=k*16, repetition=repetition)(z) # out_shape == k*16, _img_len/ 4, _img_len/ 4
    z = residual_block(nb_filter=k*32, repetition=repetition)(z) # out_shape == k*32, _img_len/ 8, _img_len/ 8
    z = residual_block(nb_filter=k*64, repetition=repetition)(z) # out_shape == k*64, _img_len/16, _img_len/16
    z = AveragePooling2D((_img_len/16, _img_len/16))(z)
    z = Flatten()(z)
    z = Dense(output_dim=output_dim, activation='sigmoid', W_regularizer=l2(_Wreg_l2), init='zero')(z)

    return Model(input=x, output=z)
项目:deepascii    作者:awentzonline    | 项目源码 | 文件源码
def make_model(
        img_shape, charset_features, layer_name='block2_conv1',
        output_pool=2, pool_type='max'):
    if K.image_dim_ordering():
        num_chars, char_h, char_w, char_channels = charset_features.shape
        axis = -1
    else:
        num_chars, char_channels, char_h, char_w = charset_features.shape
        axis = 1
    vgg = vgg16.VGG16(input_shape=img_shape, include_top=False)
    layer = vgg.get_layer(layer_name)
    x = layer.output
    # TODO: theano dim order
    features_W = charset_features.transpose((1, 2, 3, 0)).astype(np.float32)
    features_W = features_W[::-1, ::-1, :, :] / np.sqrt(np.sum(np.square(features_W), axis=(0, 1), keepdims=True))
    x = BatchNormalization(mode=2)(x)
    x = Convolution2D(
        num_chars, char_h, char_w, border_mode='valid',
        weights=[features_W, np.zeros(num_chars)])(x)
    if output_pool > 1:
        pool_class = dict(max=MaxPooling2D, avg=AveragePooling2D)[pool_type]
        x = pool_class((output_pool, output_pool))(x)
    #x = Argmax(axis)(x)
    model = Model([vgg.input], [x])
    return model
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def interp_block(prev_layer, level, feature_map_shape, input_shape):
    if input_shape == (473, 473):
        kernel_strides_map = {1: 60,
                              2: 30,
                              3: 20,
                              6: 10}
    elif input_shape == (713, 713):
        kernel_strides_map = {1: 90,
                              2: 45,
                              3: 30,
                              6: 15}
    else:
        print("Pooling parameters for input shape ", input_shape, " are not defined.")
        exit(1)

    names = [
        "conv5_3_pool" + str(level) + "_conv",
        "conv5_3_pool" + str(level) + "_conv_bn"
        ]
    kernel = (kernel_strides_map[level], kernel_strides_map[level])
    strides = (kernel_strides_map[level], kernel_strides_map[level])
    prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer)
    prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0],
                        use_bias=False)(prev_layer)
    prev_layer = BN(name=names[1])(prev_layer)
    prev_layer = Activation('relu')(prev_layer)
    prev_layer = Lambda(Interp, arguments={'shape': feature_map_shape})(prev_layer)
    return prev_layer
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def create_critic_network(self, state_size, action_dim):
        """create critic network."""
        print ("[MESSAGE] Build critic network.""")
        S = Input(shape=state_size)
        A = Input(shape=(action_dim,))

        # input
        h_0 = Conv2D(32, (3, 3), padding="same",
                     kernel_regularizer=l2(0.0001),
                     activation="relu")(S)
        h_1 = Conv2D(32, (3, 3), padding="same",
                     kernel_regularizer=l2(0.0001),
                     activation="relu")(h_0)
        h_1 = AveragePooling2D(2, 2)(h_1)
        h_1 = Flatten()(h_1)
        h_1 = Dense(600, activation="relu")(h_1)

        # action
        a_1 = Dense(600, activation="linear")(A)
        h_2 = add([h_1, a_1])
        h_3 = Dense(600, activation="relu")(h_2)
        V = Dense(action_dim, activation="softmax")(h_3)

        model = Model(inputs=[S, A], outputs=V)
        model.compile(loss='categorical_crossentropy',
                      optimizer="adam")
        return model, A, S
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def _add_auxiliary_head(x, classes, weight_decay):
    '''Adds an auxiliary head for training the model

    From section A.7 "Training of ImageNet models" of the paper, all NASNet models are
    trained using an auxiliary classifier around 2/3 of the depth of the network, with
    a loss weight of 0.4

    # Arguments
        x: input tensor
        classes: number of output classes
        weight_decay: l2 regularization weight

    # Returns
        a keras Tensor
    '''
    img_height = 1 if K.image_data_format() == 'channels_last' else 2
    img_width = 2 if K.image_data_format() == 'channels_last' else 3
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('auxiliary_branch'):
        auxiliary_x = Activation('relu')(x)
        auxiliary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid', name='aux_pool')(auxiliary_x)
        auxiliary_x = Conv2D(128, (1, 1), padding='same', use_bias=False, name='aux_conv_projection',
                             kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(auxiliary_x)
        auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                         name='aux_bn_projection')(auxiliary_x)
        auxiliary_x = Activation('relu')(auxiliary_x)

        auxiliary_x = Conv2D(768, (auxiliary_x._keras_shape[img_height], auxiliary_x._keras_shape[img_width]),
                             padding='valid', use_bias=False, kernel_initializer='he_normal',
                             kernel_regularizer=l2(weight_decay), name='aux_conv_reduction')(auxiliary_x)
        auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                         name='aux_bn_reduction')(auxiliary_x)
        auxiliary_x = Activation('relu')(auxiliary_x)

        auxiliary_x = GlobalAveragePooling2D()(auxiliary_x)
        auxiliary_x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay),
                            name='aux_predictions')(auxiliary_x)
    return auxiliary_x
项目:keras-squeezenet    作者:dvbuntu    | 项目源码 | 文件源码
def get_squeezenet(nb_classes):

    input_img = Input(shape=(3, 227, 227))
    x = Convolution2D(96, 7, 7, subsample=(2, 2), border_mode='valid')(input_img)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)

    x = fire_module(x, 16, 64)
    x = fire_module(x, 16, 64)
    x = fire_module(x, 32, 128)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)

    x = fire_module(x, 32, 192)
    x = fire_module(x, 48, 192)
    x = fire_module(x, 48, 192)
    x = fire_module(x, 64, 256)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)

    x = fire_module(x, 64, 256)
    x = Dropout(0.5)(x)

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x)

    # global pooling not available
    x = AveragePooling2D(pool_size=(15, 15))(x)
    x = Flatten()(x)
    out = Dense(nb_classes, activation='softmax')(x)
    model = Model(input=input_img, output=[out])
    return model
项目:keras-squeezenet    作者:dvbuntu    | 项目源码 | 文件源码
def get_small_squeezenet(nb_classes):

    input_img = Input(shape=(3, 32, 32))
    x = Convolution2D(16, 3, 3, border_mode='same')(input_img)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(3, 3))(x)

    x = fire_module(x, 32, 128)
    x = fire_module(x, 32, 128)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = fire_module(x, 48, 192)
    x = fire_module(x, 48, 192)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = fire_module(x, 64, 256)
    x = Dropout(0.5)(x)

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Convolution2D(nb_classes, 1, 1, border_mode='valid')(x)

    # global pooling not available
    x = AveragePooling2D(pool_size=(4, 4))(x)
    x = Flatten()(x)
    out = Dense(nb_classes, activation='softmax')(x)
    model = Model(input=input_img, output=[out])
    return model
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model(image_shape):
    img_input = Input(shape=image_shape)

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=3, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)

    x = res_block(x, nb_filters=512, block=4, subsample_factor=2)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)

    model = Model(img_input, x)
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall', 'fmeasure'])
    model.summary()
    return model
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model(image_shape):
    img_input = Input(shape=image_shape)

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=3, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)

    x = res_block(x, nb_filters=512, block=4, subsample_factor=2)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)

    model = Model(img_input, x)
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall', 'fmeasure'])
    model.summary()
    return model
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model(image_shape, transfer_weights_filepath):
    img_input = Input(shape=image_shape)

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=3, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)
    x_orig = Dense(1, activation='sigmoid')(x)

    model_base = Model(img_input, x_orig)
    model_base.load_weights(transfer_weights_filepath)

    bbox = Dense(4, activation='linear', name='bbox')(x)
    model_bbox = Model(img_input, bbox)
    model_bbox.compile(optimizer='adam', loss='mae')
    model_bbox.summary()
    return model_bbox
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model(image_shape, transfer_weights_filepath):
    img_input = Input(shape=image_shape)

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=3, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)
    x_orig = Dense(1, activation='sigmoid')(x)

    model_base = Model(img_input, x_orig)
    model_base.load_weights(transfer_weights_filepath)

    bbox = Dense(4, activation='linear', name='bbox')(x)
    model_bbox = Model(img_input, bbox)
    model_bbox.compile(optimizer='adam', loss='mae')
    model_bbox.summary()
    return model_bbox
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model(image_shape):
    img_input = Input(shape=image_shape)

    x = Convolution2D(128, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=0, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=512, block=0, subsample_factor=2)
    x = res_block(x, nb_filters=512, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=1024, block=0, subsample_factor=2)
    x = res_block(x, nb_filters=1024, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=1024, block=0, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)

    model = Model(img_input, x)
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall'])
    model.summary()
    return model
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model(image_shape):
    img_input = Input(shape=image_shape)

    x = Convolution2D(128, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=512, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=512, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=2, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(8, 8))(x)
    x = Flatten()(x)
    x = Dropout(0.2)(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)

    model = Model(img_input, x)
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall', 'fmeasure'])
    model.summary()
    return model
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model():
    img_input = Input(shape=(32, 32, 1))

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=3, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)

    bbox = Dense(4, activation='linear', name='bbox')(x)
    model_bbox = Model(img_input, bbox)
    model_bbox.compile(optimizer='adam', loss='mae')

    return model_bbox
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model():
    img_input = Input(shape=(32, 32, 2))

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=3, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)

    bbox = Dense(4, activation='linear', name='bbox')(x)
    model_bbox = Model(img_input, bbox)
    model_bbox.compile(optimizer='adam', loss='mae')

    return model_bbox
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model():
    img_input = Input(shape=(64, 64, 3))

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=3, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)

    x = res_block(x, nb_filters=512, block=4, subsample_factor=2)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)

    model = Model(img_input, x)
    model.compile(optimizer='adam', loss='binary_crossentropy')

    return model
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model():
    img_input = Input(shape=(64, 64, 5))

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=3, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=3, subsample_factor=1)

    x = res_block(x, nb_filters=512, block=4, subsample_factor=2)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=4, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid')(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)

    model = Model(img_input, x)
    model.compile(optimizer='adam', loss='binary_crossentropy')

    return model
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model():
    img_input = Input(shape=(32, 32, 1))

    x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=32, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=64, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=64, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=128, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=2, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(8, 8))(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)

    model = Model(img_input, x)
    model.compile(optimizer='adam', loss='binary_crossentropy')

    return model
项目:kaggle-lung-cancer    作者:mdai    | 项目源码 | 文件源码
def define_model():
    img_input = Input(shape=(32, 32, 1))

    x = Convolution2D(128, 3, 3, subsample=(1, 1), border_mode='same')(img_input)

    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)
    x = res_block(x, nb_filters=128, block=0, subsample_factor=1)

    x = res_block(x, nb_filters=256, block=1, subsample_factor=2)
    x = res_block(x, nb_filters=256, block=1, subsample_factor=1)
    x = res_block(x, nb_filters=256, block=1, subsample_factor=1)

    x = res_block(x, nb_filters=512, block=2, subsample_factor=2)
    x = res_block(x, nb_filters=512, block=2, subsample_factor=1)
    x = res_block(x, nb_filters=512, block=2, subsample_factor=1)

    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = AveragePooling2D(pool_size=(8, 8))(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)

    model = Model(img_input, x)
    model.compile(optimizer='adam', loss='binary_crossentropy')

    return model
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def test_delete_channels_averagepooling2d(channel_index, data_format):
    layer = AveragePooling2D([2, 3], data_format=data_format)
    layer_test_helper_flatten_2d(layer, channel_index, data_format)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_average_pooling_no_overlap(self):
        # no_overlap: pool_size = strides
        model = Sequential()
        model.add(AveragePooling2D(input_shape=(16,16,3), pool_size=(2, 2),
                               strides=None, padding='valid'))
        self._test_keras_model(model, delta=1e-2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_average_pooling_inception_config_1(self):
        # no_overlap: pool_size = strides
        model = Sequential()
        model.add(AveragePooling2D(input_shape=(16,16,3), pool_size=(3,3),
                               strides=(1,1), padding='same'))
        self._test_keras_model(model, delta=1e-2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_mcrnn_td(self):

        model = Sequential()
        model.add(Conv2D(3,(1,1), input_shape=(2,4,4), padding='same'))
        model.add(AveragePooling2D(pool_size=(2,2)))
        model.add(Reshape((2,3)))
        model.add(TimeDistributed(Dense(5)))

        self._test_keras_model(model)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_mcrnn_recurrent(self):

        model = Sequential()
        model.add(Conv2D(3,(1,1), input_shape=(2,4,4), padding='same'))
        model.add(AveragePooling2D(pool_size=(2,2)))
        model.add(Reshape((2,3)))
        model.add(LSTM(5, recurrent_activation = 'sigmoid'))

        self._test_keras_model(model)
项目:cnn_finetune    作者:flyyufelix    | 项目源码 | 文件源码
def block_inception_c(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 256, 1, 1)

    branch_1 = conv2d_bn(input, 384, 1, 1)
    branch_10 = conv2d_bn(branch_1, 256, 1, 3)
    branch_11 = conv2d_bn(branch_1, 256, 3, 1)
    branch_1 = merge([branch_10, branch_11], mode='concat', concat_axis=channel_axis)


    branch_2 = conv2d_bn(input, 384, 1, 1)
    branch_2 = conv2d_bn(branch_2, 448, 3, 1)
    branch_2 = conv2d_bn(branch_2, 512, 1, 3)
    branch_20 = conv2d_bn(branch_2, 256, 1, 3)
    branch_21 = conv2d_bn(branch_2, 256, 3, 1)
    branch_2 = merge([branch_20, branch_21], mode='concat', concat_axis=channel_axis)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    branch_3 = conv2d_bn(branch_3, 256, 1, 1)

    x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
    return x
项目:cori-tf-distributed-examples    作者:NERSC    | 项目源码 | 文件源码
def make_model(x_shape, batch_size=128, num_classes=10):
    y = tf.placeholder(dtype=tf.int32,shape=(batch_size,))# Input(dtype=tf.int32, shape=y_shape)
    K.set_learning_phase(1)
    img_input = Input(batch_shape= tuple( [batch_size] + list(x_shape)))
    bn_axis = 3


    x = Conv2D(64, (3, 3), strides=(1, 1), padding='same',name='conv1')(img_input)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)


    x = conv_block(x, 3, [16,16,16], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [16,16,16], stage=2, block='b')
    x = identity_block(x, 3, [16,16,16], stage=2, block='c')
    x = identity_block(x, 3, [16,16,16], stage=3, block='d')


    x = conv_block(x, 3, [32,32,32], stage=3, block='a', strides=(2,2))
    x = identity_block(x, 3, [32,32,32], stage=3, block='b')
    x = identity_block(x, 3, [32,32,32], stage=3, block='c')
    x = identity_block(x, 3, [32,32,32], stage=3, block='d')



    x = conv_block(x, 3, [64,64,64], stage=3, block='a', strides=(2,2))
    x = identity_block(x, 3, [64,64,64], stage=3, block='b')
    x = identity_block(x, 3, [64,64,64], stage=3, block='c')
    x = identity_block(x, 3, [64,64,64], stage=3, block='d')


    x = AveragePooling2D((8, 8), name='avg_pool')(x)


    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name='fc1000')(x)

    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=x,labels=y))

    return img_input, y, loss
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4, block_prefix=None):
    '''
    Adds a pointwise convolution layer (with batch normalization and relu),
    and an average pooling layer. The number of output convolution filters
    can be reduced by appropriately reducing the compression parameter.

    # Arguments
        ip: input keras tensor
        nb_filter: integer, the dimensionality of the output space
            (i.e. the number output of filters in the convolution)
        compression: calculated as 1 - reduction. Reduces the number
            of feature maps in the transition block.
        weight_decay: weight decay factor
        block_prefix: str, for block unique naming

    # Input shape
        4D tensor with shape:
        `(samples, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows, cols, channels)` if data_format='channels_last'.

    # Output shape
        4D tensor with shape:
        `(samples, nb_filter * compression, rows / 2, cols / 2)`
        if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows / 2, cols / 2, nb_filter * compression)`
        if data_format='channels_last'.

    # Returns
        a keras tensor
    '''
    with K.name_scope('Transition'):
        concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, name=name_or_none(block_prefix, '_bn'))(ip)
        x = Activation('relu')(x)
        x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same',
                   use_bias=False, kernel_regularizer=l2(weight_decay), name=name_or_none(block_prefix, '_conv2D'))(x)
        x = AveragePooling2D((2, 2), strides=(2, 2))(x)

        return x
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def _adjust_block(p, ip, filters, weight_decay=5e-5, id=None):
    '''
    Adjusts the input `p` to match the shape of the `input`
    or situations where the output number of filters needs to
    be changed

    # Arguments:
        p: input tensor which needs to be modified
        ip: input tensor whose shape needs to be matched
        filters: number of output filters to be matched
        weight_decay: l2 regularization weight
        id: string id

    # Returns:
        an adjusted Keras tensor
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
    img_dim = 2 if K.image_data_format() == 'channels_first' else -2

    with K.name_scope('adjust_block'):
        if p is None:
            p = ip

        elif p._keras_shape[img_dim] != ip._keras_shape[img_dim]:
            with K.name_scope('adjust_reduction_block_%s' % id):
                p = Activation('relu', name='adjust_relu_1_%s' % id)(p)

                p1 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_1_%s' % id)(p)
                p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
                            name='adjust_conv_1_%s' % id, kernel_initializer='he_normal')(p1)

                p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % id)(p2)
                p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
                            name='adjust_conv_2_%s' % id, kernel_initializer='he_normal')(p2)

                p = concatenate([p1, p2], axis=channel_dim)
                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                       name='adjust_bn_%s' % id)(p)

        elif p._keras_shape[channel_dim] != filters:
            with K.name_scope('adjust_projection_block_%s' % id):
                p = Activation('relu')(p)
                p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % id,
                           use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_normal')(p)
                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                       name='adjust_bn_%s' % id)(p)
    return p
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def _normal_A(ip, p, filters, weight_decay=5e-5, id=None):
    '''Adds a Normal cell for NASNet-A (Fig. 4 in the paper)

    # Arguments:
        ip: input tensor `x`
        p: input tensor `p`
        filters: number of output filters
        weight_decay: l2 regularization weight
        id: string id

    # Returns:
        a Keras tensor
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('normal_A_block_%s' % id):
        p = _adjust_block(p, ip, filters, weight_decay, id)

        h = Activation('relu')(ip)
        h = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='normal_conv_1_%s' % id,
                   use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(h)
        h = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                               name='normal_bn_1_%s' % id)(h)

        with K.name_scope('block_1'):
            x1_1 = _separable_conv_block(h, filters, kernel_size=(5, 5), weight_decay=weight_decay,
                                         id='normal_left1_%s' % id)
            x1_2 = _separable_conv_block(p, filters, weight_decay=weight_decay, id='normal_right1_%s' % id)
            x1 = add([x1_1, x1_2], name='normal_add_1_%s' % id)

        with K.name_scope('block_2'):
            x2_1 = _separable_conv_block(p, filters, (5, 5), weight_decay=weight_decay, id='normal_left2_%s' % id)
            x2_2 = _separable_conv_block(p, filters, (3, 3), weight_decay=weight_decay, id='normal_right2_%s' % id)
            x2 = add([x2_1, x2_2], name='normal_add_2_%s' % id)

        with K.name_scope('block_3'):
            x3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_left3_%s' % (id))(h)
            x3 = add([x3, p], name='normal_add_3_%s' % id)

        with K.name_scope('block_4'):
            x4_1 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_left4_%s' % (id))(p)
            x4_2 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='normal_right4_%s' % (id))(p)
            x4 = add([x4_1, x4_2], name='normal_add_4_%s' % id)

        with K.name_scope('block_5'):
            x5 = _separable_conv_block(h, filters, weight_decay=weight_decay, id='normal_left5_%s' % id)
            x5 = add([x5, h], name='normal_add_5_%s' % id)

        x = concatenate([p, x1, x2, x3, x4, x5], axis=channel_dim, name='normal_concat_%s' % id)
    return x, ip
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def _reduction_A(ip, p, filters, weight_decay=5e-5, id=None):
    '''Adds a Reduction cell for NASNet-A (Fig. 4 in the paper)

    # Arguments:
        ip: input tensor `x`
        p: input tensor `p`
        filters: number of output filters
        weight_decay: l2 regularization weight
        id: string id

    # Returns:
        a Keras tensor
    '''
    """"""
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('reduction_A_block_%s' % id):
        p = _adjust_block(p, ip, filters, weight_decay, id)

        h = Activation('relu')(ip)
        h = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='reduction_conv_1_%s' % id,
                   use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(h)
        h = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                               name='reduction_bn_1_%s' % id)(h)

        with K.name_scope('block_1'):
            x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2), weight_decay=weight_decay,
                                         id='reduction_left1_%s' % id)
            x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), weight_decay=weight_decay,
                                         id='reduction_1_%s' % id)
            x1 = add([x1_1, x1_2], name='reduction_add_1_%s' % id)

        with K.name_scope('block_2'):
            x2_1 = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_left2_%s' % id)(h)
            x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), weight_decay=weight_decay,
                                         id='reduction_right2_%s' % id)
            x2 = add([x2_1, x2_2], name='reduction_add_2_%s' % id)

        with K.name_scope('block_3'):
            x3_1 = AveragePooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_left3_%s' % id)(h)
            x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2), weight_decay=weight_decay,
                                         id='reduction_right3_%s' % id)
            x3 = add([x3_1, x3_2], name='reduction_add3_%s' % id)

        with K.name_scope('block_4'):
            x4 = AveragePooling2D((3, 3), strides=(1, 1), padding='same', name='reduction_left4_%s' % id)(x1)
            x4 = add([x2, x4])

        with K.name_scope('block_5'):
            x5_1 = _separable_conv_block(x1, filters, (3, 3), weight_decay=weight_decay, id='reduction_left4_%s' % id)
            x5_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='reduction_right5_%s' % id)(h)
            x5 = add([x5_1, x5_2], name='reduction_add4_%s' % id)

        x = concatenate([x2, x3, x4, x5], axis=channel_dim, name='reduction_concat_%s' % id)
        return x, ip
项目:carnd-behavioral-cloning    作者:nikidimi    | 项目源码 | 文件源码
def keras_nn(X_train, y_train):
    """
    Constructs a neural network using keras and trains it.
    The best final networks is saved in model.json and model.h5

    Parameters
    ----------
    X_train : numpy array
        The images
    y_train : numpy array
        The angles

    """
    model = Sequential()

    # Further reduces the dimension of the image to 8x16
    model.add(AveragePooling2D((2, 2), border_mode='valid', input_shape=(16, 32, 1)))
    # Applies 2x2 convolution
    model.add(Convolution2D(1, 2, 2, subsample=(1, 1)))
    model.add(ELU())
    # Max Pooling to reduce the dimensions. 2X4 used because it matches the aspect ratio of the input
    model.add(MaxPooling2D((2, 4), border_mode='valid'))
    # Droput - We only have 10 connections at this point, but it still improves performance. However it should be kept low, e.g. 0.5 doesn't work
    model.add(Dropout(0.25))
    model.add(Flatten())
    # The final layer - outputs a float number (the steering angle)
    model.add(Dense(1))  #

    # Show a summary of the neural network
    model.summary()

    # Save the best model by validation mean squared error
    checkpoint = ModelCheckpoint("model.h5", monitor='val_mean_squared_error', verbose=1, save_best_only=True, mode='min')

    # Stop training when there is no improvment. 
    # This is to speed up training, the accuracy is not affected, because the checkpoint will pick-up the best model anyway
    early_stop = EarlyStopping(monitor='val_mean_squared_error', min_delta=0.0001, patience=4, verbose=1, mode='min')

    # Compile the model with Adam optimizer and monitor mean squared error
    model.compile('adam', 'mean_squared_error', ['mean_squared_error'])

    # Save the model to JSON
    model_json = model.to_json()
    with open("model.json", "w") as model_file:
        model_file.write(model_json)

    # Start training.
    # nb_epoch should be a big number, there is early stopping callback anyway
    # Data is split by keras to training and validation
    history = model.fit(X_train, y_train, batch_size=32, nb_epoch=150, verbose=1, callbacks=[checkpoint, early_stop], validation_split=0.2, shuffle=True)
项目:cifar-10-cnn    作者:BIGBALLON    | 项目源码 | 文件源码
def densenet(img_input,classes_num):

    def bn_relu(x):
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        return x

    def bottleneck(x):
        channels = growth_rate * 4
        x = bn_relu(x)
        x = Conv2D(channels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        x = bn_relu(x)
        x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        return x

    def single(x):
        x = bn_relu(x)
        x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        return x

    def transition(x, inchannels):
        outchannels = int(inchannels * compression)
        x = bn_relu(x)
        x = Conv2D(outchannels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        x = AveragePooling2D((2,2), strides=(2, 2))(x)
        return x, outchannels

    def dense_block(x,blocks,nchannels):
        concat = x
        for i in range(blocks):
            x = bottleneck(concat)
            concat = concatenate([x,concat], axis=-1)
            nchannels += growth_rate
        return concat, nchannels

    def dense_layer(x):
        return Dense(classes_num,activation='softmax',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(x)


    nblocks = (depth - 4) // 6 
    nchannels = growth_rate * 2

    x = Conv2D(nchannels,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(img_input)

    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x = bn_relu(x)
    x = GlobalAveragePooling2D()(x)
    x = dense_layer(x)
    return x
项目:cifar-10-cnn    作者:BIGBALLON    | 项目源码 | 文件源码
def densenet(img_input,classes_num):

    def bn_relu(x):
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        return x

    def bottleneck(x):
        channels = growth_rate * 4
        x = bn_relu(x)
        x = Conv2D(channels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        x = bn_relu(x)
        x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        return x

    def single(x):
        x = bn_relu(x)
        x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        return x

    def transition(x, inchannels):
        outchannels = int(inchannels * compression)
        x = bn_relu(x)
        x = Conv2D(outchannels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
        x = AveragePooling2D((2,2), strides=(2, 2))(x)
        return x, outchannels

    def dense_block(x,blocks,nchannels):
        concat = x
        for i in range(blocks):
            x = bottleneck(concat)
            concat = concatenate([x,concat], axis=-1)
            nchannels += growth_rate
        return concat, nchannels

    def dense_layer(x):
        return Dense(classes_num,activation='softmax',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(x)


    nblocks = (depth - 4) // 6 
    nchannels = growth_rate * 2

    x = Conv2D(nchannels,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(img_input)

    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x, nchannels = dense_block(x,nblocks,nchannels)
    x, nchannels = transition(x,nchannels)
    x = bn_relu(x)
    x = GlobalAveragePooling2D()(x)
    x = dense_layer(x)
    return x
项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def build_model():
    model = Sequential()

    # ???????4????????????5*5?1??????????,????1??
    model.add(Convolution2D(4, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, 20, 20)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))

    # ???????8????????????3*3?4??????????????????????
    model.add(GaussianNoise(0.001))
    model.add(UpSampling2D(size=(2, 2), dim_ordering='th'))

    model.add(AtrousConvolution2D(16, 3, 3, border_mode='valid', dim_ordering='th'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    # model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(AveragePooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(Activation('tanh'))

    # ???????16????????????4*4
    model.add(AtrousConvolution2D(8, 3, 3, border_mode='valid', dim_ordering='th'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('linear'))

    # ???????16????????????4*4
    model.add(GaussianNoise(0.002))
    model.add(AtrousConvolution2D(4, 3, 3, border_mode='valid', dim_ordering='th'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Dropout(0.2))
    # model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(AveragePooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(Activation('tanh'))

    # ??????????????????flatten????
    model.add(Flatten())
    model.add(Dense(8))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))

    model.add(Dense(1))
    model.add(Activation('linear'))

    start = time.time()

    # ??SGD + momentum
    # model.compile????loss??????(????)
    # sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="rmsprop") # mse kld # Nadam  rmsprop
    print "Compilation Time : ", time.time() - start
    return model
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def rnet1(input_shapes, n_classes):
    def conv(size, x):
        x = Convolution2D(size, 3, 3, border_mode='same', init='he_normal', bias=False)(x)
        x = BatchNormalization(axis=1, mode=0)(x)
        x = PReLU(shared_axes=[2, 3])(x)
        return x

    def unet_block(sizes, inp):
        x = inp

        skips = []

        for sz in sizes[:-1]:
            x = conv(sz, x)
            skips.append(x)
            x = MaxPooling2D((2, 2))(x)

        x = conv(sizes[-1], x)

        for sz in reversed(sizes[:-1]):
            x = conv(sz, merge([UpSampling2D((2, 2))(x), skips.pop()], mode='concat', concat_axis=1))

        return x

    def fcn_block(sizes, inp):
        x = inp

        for sz in sizes:
            x = conv(sz, x)

        return Dropout(0.2)(x)

    # Build piramid of inputs
    inp0 = Input(input_shapes['in'], name='in')
    inp1 = AveragePooling2D((2, 2))(inp0)
    inp2 = AveragePooling2D((2, 2))(inp1)

    # Build outputs in resnet fashion
    out2 = unet_block([32, 48], inp2)
    #out2 = merge([unet_block([32, 48, 32], merge([inp2, out2], mode='concat', concat_axis=1)), out2], mode='sum')

    out1 = UpSampling2D((2, 2))(out2)
    #out1 = merge([unet_block([32, 32, 48], merge([inp1, out1], mode='concat', concat_axis=1)), out1], mode='sum')
    out1 = merge([unet_block([32, 48, 64], merge([inp1, out1], mode='concat', concat_axis=1)), out1], mode='sum')

    out0 = UpSampling2D((2, 2))(out1)
    out0 = merge([unet_block([32, 48, 64], merge([inp0, out0], mode='concat', concat_axis=1)), out0], mode='sum')
    out0 = merge([unet_block([32, 48, 64], merge([inp0, out0], mode='concat', concat_axis=1)), out0], mode='sum')

    # Final convolution
    out = Convolution2D(n_classes, 1, 1, activation='sigmoid')(out0)

    return Model(input=inp0, output=out)
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def rnet1_mi(input_shapes, n_classes):
    def conv(size, x):
        x = Convolution2D(size, 3, 3, border_mode='same', init='he_normal', bias=False)(x)
        x = BatchNormalization(axis=1, mode=0)(x)
        x = PReLU(shared_axes=[2, 3])(x)
        return x

    def unet_block(sizes, inp):
        x = inp

        skips = []

        for sz in sizes[:-1]:
            x = conv(sz, x)
            skips.append(x)
            x = MaxPooling2D((2, 2))(x)

        x = conv(sizes[-1], x)

        for sz in reversed(sizes[:-1]):
            x = conv(sz, merge([UpSampling2D((2, 2))(x), skips.pop()], mode='concat', concat_axis=1))

        return x

    def radd(out, inp, block):
        block_in = merge([inp, out], mode='concat', concat_axis=1)
        block_out = block(block_in)

        return merge([block_out, out], mode='sum')

    in_I = Input(input_shapes['in_I'], name='in_I')
    in_M = Input(input_shapes['in_M'], name='in_M')

    # Build piramid of inputs
    inp0 = in_I
    inp1 = AveragePooling2D((2, 2))(inp0)
    inp2 = merge([AveragePooling2D((2, 2))(inp1), in_M], mode='concat', concat_axis=1)
    inp3 = AveragePooling2D((2, 2))(inp2)

    # Build outputs in resnet fashion
    out3 = unet_block([32, 48], inp3)

    out2 = UpSampling2D((2, 2))(out3)
    out2 = radd(out2, inp2, lambda x: unet_block([32, 48], x))

    out1 = UpSampling2D((2, 2))(out2)
    out1 = radd(out1, inp1, lambda x: unet_block([32, 48], x))
    out1 = radd(out1, inp1, lambda x: unet_block([32, 48, 64], x))

    out0 = UpSampling2D((2, 2))(out1)
    out0 = radd(out0, inp0, lambda x: unet_block([32, 48], x))
    out0 = radd(out0, inp0, lambda x: unet_block([32, 48, 64], x))

    # Final convolution
    out = Convolution2D(n_classes, 1, 1, activation='sigmoid')(out0)

    return Model(input=[in_I, in_M], output=out)
项目:googLeNet    作者:dingchenwei    | 项目源码 | 文件源码
def define_model(weight_path = None):
    input = Input(shape=(224, 224, 3))

    conv1_7x7_s2 = Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), padding='same', activation='relu', kernel_regularizer=l2(0.01))(input)

    maxpool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(conv1_7x7_s2)

    conv2_3x3_reduce = Conv2D(filters=64, kernel_size=(1, 1), padding='same', activation='relu', kernel_regularizer=l2(0.01))(maxpool1_3x3_s2)

    conv2_3x3 = Conv2D(filters=192, kernel_size=(3, 3), padding='same', activation='relu', kernel_regularizer=l2(0.01))(conv2_3x3_reduce)

    maxpool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(conv2_3x3)

    inception_3a = inception_model(input=maxpool2_3x3_s2, filters_1x1=64, filters_3x3_reduce=96, filters_3x3=128, filters_5x5_reduce=16, filters_5x5=32, filters_pool_proj=32)

    inception_3b = inception_model(input=inception_3a, filters_1x1=128, filters_3x3_reduce=128, filters_3x3=192, filters_5x5_reduce=32, filters_5x5=96, filters_pool_proj=64)

    maxpool3_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(inception_3b)

    inception_4a = inception_model(input=maxpool3_3x3_s2, filters_1x1=192, filters_3x3_reduce=96, filters_3x3=208, filters_5x5_reduce=16, filters_5x5=48, filters_pool_proj=64)

    inception_4b = inception_model(input=inception_4a, filters_1x1=160, filters_3x3_reduce=112, filters_3x3=224, filters_5x5_reduce=24, filters_5x5=64, filters_pool_proj=64)

    inception_4c = inception_model(input=inception_4b, filters_1x1=128, filters_3x3_reduce=128, filters_3x3=256, filters_5x5_reduce=24, filters_5x5=64, filters_pool_proj=64)

    inception_4d = inception_model(input=inception_4c, filters_1x1=112, filters_3x3_reduce=144, filters_3x3=288, filters_5x5_reduce=32, filters_5x5=64, filters_pool_proj=64)

    inception_4e = inception_model(input=inception_4d, filters_1x1=256, filters_3x3_reduce=160, filters_3x3=320, filters_5x5_reduce=32, filters_5x5=128, filters_pool_proj=128)

    maxpool4_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(inception_4e)

    inception_5a = inception_model(input=maxpool4_3x3_s2, filters_1x1=256, filters_3x3_reduce=160, filters_3x3=320, filters_5x5_reduce=32, filters_5x5=128, filters_pool_proj=128)

    inception_5b = inception_model(input=inception_5a, filters_1x1=384, filters_3x3_reduce=192, filters_3x3=384, filters_5x5_reduce=48, filters_5x5=128, filters_pool_proj=128)

    averagepool1_7x7_s1 = AveragePooling2D(pool_size=(7, 7), strides=(7, 7), padding='same')(inception_5b)

    drop1 = Dropout(rate=0.4)(averagepool1_7x7_s1)

    linear = Dense(units=1000, activation='softmax', kernel_regularizer=l2(0.01))(keras.layers.core.Flatten(drop1))
    last = linear


    model = Model(inputs=input, outputs=last)
    model.summary()
项目:ecogdeep    作者:nancywang1991    | 项目源码 | 文件源码
def ecog_1d_model(channels=None, weights=None):

    input_tensor = Input(shape=(1, channels, 1000))
    # Block 1
    x = AveragePooling2D((1, 5), name='pre_pool')(input_tensor)
    x = Convolution2D(4, 1, 3, border_mode='same', name='block1_conv1')(x)
    # x = BatchNormalization(axis=1)(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((1, 3), name='block1_pool')(x)

    # Block 2
    x = Convolution2D(8, 1, 3, border_mode='same', name='block2_conv1')(x)
    # x = BatchNormalization(axis=1)(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((1, 3), name='block2_pool')(x)

    # Block 3
    x = Convolution2D(16, 1, 3, border_mode='same', name='block3_conv1')(x)
    # x = BatchNormalization(axis=1)(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((1, 2), name='block3_pool')(x)

    # Block 4
    # x = Convolution2D(32, 1, 3, border_mode='same', name='block4_conv1')(x)
    # x = BatchNormalization(axis=1)(x)
    # x = Activation('relu')(x)
    # x = MaxPooling2D((1, 2), name='block4_pool')(x)

    x = Flatten(name='flatten')(x)
    x = Dropout(0.5)(x)
    x = Dense(64, W_regularizer=l2(0.01), name='fc1')(x)
    #x = BatchNormalization()(x)
    #x = Activation('relu')(x)
    #x = Dropout(0.5)(x)
    #x = Dense(1, name='predictions')(x)
    # x = BatchNormalization()(x)
    predictions = Activation('sigmoid')(x)

    # for layer in base_model.layers[:10]:
    #    layer.trainable = False
    model = Model(input=input_tensor, output=predictions)
    if weights is not None:
        model.load_weights(weights)

    return model
项目:CarND-Behavioral-Cloning    作者:dventimi    | 项目源码 | 文件源码
def CarND(input_shape, crop_shape):
    model = Sequential()

    # Crop
    # model.add(Cropping2D(((80,20),(1,1)), input_shape=input_shape, name="Crop"))
    model.add(Cropping2D(crop_shape, input_shape=input_shape, name="Crop"))

    # Resize
    model.add(AveragePooling2D(pool_size=(1,4), name="Resize", trainable=False))

    # Normalize input.
    model.add(BatchNormalization(axis=1, name="Normalize"))

    # Reduce dimensions through trainable convolution, activation, and
    # pooling layers.
    model.add(Convolution2D(24, 3, 3, subsample=(2,2), name="Convolution2D1", activation="relu"))
    model.add(MaxPooling2D(name="MaxPool1"))
    model.add(Convolution2D(36, 3, 3, subsample=(1,1), name="Convolution2D2", activation="relu"))
    model.add(MaxPooling2D(name="MaxPool2"))
    model.add(Convolution2D(48, 3, 3, subsample=(1,1), name="Convolution2D3", activation="relu"))
    model.add(MaxPooling2D(name="MaxPool3"))

    # Dropout for regularization
    model.add(Dropout(0.1, name="Dropout"))

    # Flatten input in a non-trainable layer before feeding into
    # fully-connected layers.
    model.add(Flatten(name="Flatten"))

    # Model steering through trainable layers comprising dense units
    # as ell as dropout units for regularization.
    model.add(Dense(100, activation="relu", name="FC2"))
    model.add(Dense(50, activation="relu", name="FC3"))
    model.add(Dense(10, activation="relu", name="FC4"))

    # Generate output (steering angles) with a single non-trainable
    # node.
    model.add(Dense(1, name="Readout", trainable=False))
    return model

# #+RESULTS:

#       Here is a summary of the actual model, as generated directly by
#       =model.summary= in Keras.
项目:Sacred_Deep_Learning    作者:AAbercrombie0492    | 项目源码 | 文件源码
def build_model(self):
        from resnet50 import identity_block, conv_block
        from keras.layers import Input
        from keras import layers
        from keras.layers import Dense
        from keras.layers import Activation
        from keras.layers import Flatten
        from keras.layers import Conv2D
        from keras.layers import MaxPooling2D
        from keras.layers import GlobalMaxPooling2D
        from keras.layers import ZeroPadding2D
        from keras.layers import AveragePooling2D
        from keras.layers import GlobalAveragePooling2D
        from keras.layers import BatchNormalization
        from keras.models import Model
        from keras.preprocessing import image
        import keras.backend as K
        from keras.utils import layer_utils

        x = ZeroPadding2D((3, 3))(input_shape=self.X[0].shape)
        x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

        x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

        x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

        x = AveragePooling2D((7, 7), name='avg_pool')(x)
        x = Flatten()(x)
        x = Dense(2, activation='softmax', name = 'fc1000')(x)

        self.model = Model(inputs, x, name='resnet50')
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def pooling(layer, layer_in, layerId):
    poolMap = {
        ('1D', 'MAX'): MaxPooling1D,
        ('2D', 'MAX'): MaxPooling2D,
        ('3D', 'MAX'): MaxPooling3D,
        ('1D', 'AVE'): AveragePooling1D,
        ('2D', 'AVE'): AveragePooling2D,
        ('3D', 'AVE'): AveragePooling3D,
    }
    out = {}
    layer_type = layer['params']['layer_type']
    pool_type = layer['params']['pool']
    padding = get_padding(layer)
    if (layer_type == '1D'):
        strides = layer['params']['stride_w']
        kernel = layer['params']['kernel_w']
        if (padding == 'custom'):
            p_w = layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding1D(padding=p_w)(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    elif (layer_type == '2D'):
        strides = (layer['params']['stride_h'], layer['params']['stride_w'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'])
        if (padding == 'custom'):
            p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    else:
        strides = (layer['params']['stride_h'], layer['params']['stride_w'],
                   layer['params']['stride_d'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'],
                  layer['params']['kernel_d'])
        if (padding == 'custom'):
            p_h, p_w, p_d = layer['params']['pad_h'], layer['params']['pad_w'],\
                            layer['params']['pad_d']
            out[layerId + 'Pad'] = ZeroPadding3D(padding=(p_h, p_w, p_d))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    out[layerId] = poolMap[(layer_type, pool_type)](pool_size=kernel, strides=strides, padding=padding)(
                                                    *layer_in)
    return out


# ********** Locally-connected Layers **********