Python keras.layers.pooling 模块,MaxPooling2D() 实例源码

我们从Python开源项目中,提取了以下30个代码示例,用于说明如何使用keras.layers.pooling.MaxPooling2D()

项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def build_simpleCNN(input_shape = (32, 32, 3), num_output = 10):

    h, w, nch = input_shape
    assert h == w, 'expect input shape (h, w, nch), h == w'

    images = Input(shape = (h, h, nch))
    x = Conv2D(64, (4, 4), strides = (1, 1),
               kernel_initializer = init, padding = 'same')(images)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size = (2, 2))(x)
    x = Conv2D(128, (4, 4), strides = (1, 1),
               kernel_initializer = init, padding = 'same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size = (2, 2))(x)
    x = Flatten()(x)
    outputs = Dense(num_output, kernel_initializer = init,
                    activation = 'softmax')(x)

    model = Model(inputs = images, outputs = outputs)
    return model
项目:Gene-prediction    作者:sriram2093    | 项目源码 | 文件源码
def regionProposalNetwork(base_layers, noOfAnchors):
    """
    Region Proposal Network
    """
    x = Conv2D(512, (1, 300), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv1')(base_layers)
    print 'INFO: rpn_conv1: ',x



    #x = Conv2D(512, (1, 302), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv2')(base_layers)
    #x = MaxPooling2D((1,2), strides = (1,2))(x)

    x_class = Conv2D(noOfAnchors, (1, 103), activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x)
    print 'INFO: rpn_out_class: ',x_class
    x_regr = Conv2D(noOfAnchors * 4, (1, 103), activation='linear', kernel_initializer='zero', name='rpn_out_regress')(x)
    print 'INFO: rpn_out_regress: ',x_regr
    return [x_class, x_regr, base_layers]
项目:eva-didi    作者:eljefec    | 项目源码 | 文件源码
def build_model(dropout):
    model = Sequential()
    model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = INPUT_SHAPE))
    model.add(Conv2D(3, (1, 1), activation='relu'))
    model.add(Conv2D(12, (5, 5), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(24, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(48, (3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dropout(dropout))
    model.add(Dense(64, activation = 'relu'))
    model.add(Dropout(dropout))
    model.add(Dense(32, activation = 'relu'))
    model.add(Dropout(dropout))
    model.add(Dense(1))

    return model
项目:eva-didi    作者:eljefec    | 项目源码 | 文件源码
def build_model(dropout_rate = 0.2):
    input_image = Input(shape = IMAGE_SHAPE,
                        dtype = 'float32',
                        name = INPUT_IMAGE)
    x = MaxPooling2D()(input_image)
    x = MaxPooling2D()(x)
    x = MaxPooling2D()(x)
    x = MaxPooling2D()(x)
    x = Dropout(dropout_rate)(x)
    x = Conv2D(32, kernel_size=3, strides=(2,2))(x)
    x = MaxPooling2D()(x)
    x = Conv2D(32, kernel_size=3, strides=(2,2))(x)
    x = MaxPooling2D()(x)
    x = Dropout(dropout_rate)(x)
    image_out = Flatten()(x)
    # image_out = Dense(32, activation='relu')(conv)

    input_lidar_panorama = Input(shape = PANORAMA_SHAPE,
                                 dtype = 'float32',
                                 name = INPUT_LIDAR_PANORAMA)
    x = pool_and_conv(input_lidar_panorama)
    x = pool_and_conv(x)
    x = Dropout(dropout_rate)(x)
    panorama_out = Flatten()(x)

    input_lidar_slices = Input(shape = SLICES_SHAPE,
                               dtype = 'float32',
                               name = INPUT_LIDAR_SLICES)
    x = MaxPooling3D(pool_size=(2,2,1))(input_lidar_slices)
    x = Conv3D(32, kernel_size=3, strides=(2,2,1))(x)
    x = MaxPooling3D(pool_size=(2,2,1))(x)
    x = Dropout(dropout_rate)(x)
    x = Conv3D(32, kernel_size=2, strides=(2,2,1))(x)
    x = MaxPooling3D(pool_size=(2,2,1))(x)
    x = Dropout(dropout_rate)(x)
    slices_out = Flatten()(x)

    x = keras.layers.concatenate([image_out, panorama_out, slices_out])

    x = Dense(32, activation='relu')(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(32, activation='relu')(x)

    pose_output = Dense(9, name=OUTPUT_POSE)(x)

    model = Model(inputs=[input_image, input_lidar_panorama, input_lidar_slices],
                  outputs=[pose_output])

    # Fix error with TF and Keras
    import tensorflow as tf
    tf.python.control_flow_ops = tf

    model.compile(loss='mean_squared_error', optimizer='adam')

    return model
项目:Keras-ResNeXt    作者:titu1994    | 项目源码 | 文件源码
def __initial_conv_block_imagenet(input, weight_decay=5e-4):
    ''' Adds an initial conv block, with batch norm and relu for the inception resnext
    Args:
        input: input tensor
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = LeakyReLU()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    return x
项目:Flower_Recognition_CNN    作者:Labyrinth108    | 项目源码 | 文件源码
def model_config(size):
    model = Sequential()

    model.add(Conv2D(32, (5, 5), padding='valid', input_shape=(size, size, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3), padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(64, kernel_initializer='he_normal', bias_initializer='zeros'))
    model.add(Activation('tanh'))

    # Softmax??
    model.add(Dense(label_size, kernel_initializer='he_normal', bias_initializer='zeros'))
    model.add(Activation('softmax'))

    return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def Serious_gluon_model(Inputs,nclasses,dropoutRate=-1):
     x =   LocallyConnected2D(64, (8,8) ,stride= (4,4) , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(Inputs[1])
#     x = MaxPooling2D(pool_size=(2, 2))(x)
     x = Convolution2D(64, (4,4) , 1 , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(x)
    # x = MaxPooling2D(pool_size=(2, 2))(x)
     x = Convolution2D(64, (4,4) , 1 , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(x)
     x = MaxPooling2D(pool_size=(2, 2))(x)
     x = Flatten()(x)
     x = merge( [x, Inputs[0]] , mode='concat')
    # linear activation for regression and softmax for classification
     x = Dense(128, activation='relu',kernel_initializer='lecun_uniform')(x) 
     x = Dense(128, activation='relu',kernel_initializer='lecun_uniform')(x) 
     x = Dense(64, activation='relu',kernel_initializer='lecun_uniform')(x) 
     x = Dense(64, activation='relu',kernel_initializer='lecun_uniform')(x) 

     predictions = [Dense(2, activation='linear',init='normal')(x),Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x)]
     model = Model(inputs=Inputs, outputs=predictions)
     return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def base_model(input_shapes):
        from keras.layers import Input
        from keras.layers.core import Masking
        x_global  = Input(shape=input_shapes[0])
        x_map = Input(shape=input_shapes[1])
        x_ptreco  = Input(shape=input_shapes[2])

        x =   Convolution2D(64, (8,8)  , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(x_map)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x =   Convolution2D(64, (4,4) , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x =   Convolution2D(64, (4,4)  , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = Flatten()(x)
        x = merge( [x, x_global] , mode='concat')
        # linear activation for regression and softmax for classification
        x = Dense(128, activation='relu',kernel_initializer='lecun_uniform')(x)
        x = merge([x, x_ptreco], mode='concat')
        return [x_global, x_map, x_ptreco], x
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def build(input_shape, num_outputs,
              block_fn, repetitions):

        inputs = Input(shape = input_shape)
        conv1 = Conv2D(64, (7, 7), strides = (2, 2),
                       padding = 'same')(inputs)
        conv1 = BatchNormalization()(conv1)
        conv1 = Activation('relu')(conv1)
        pool1 = MaxPooling2D(pool_size = (3, 3), strides = (2, 2),
                            padding = 'same')(conv1)

        x = pool1
        filters = 64
        first_layer = True
        for i, r in enumerate(repetitions):
            x = _residual_block(block_fn, filters = filters,
                                repetitions = r, is_first_layer = first_layer)(x)
            filters *= 2
            if first_layer:
                first_layer = False

        # last activation <- unnecessary???
        # x = BatchNormalization()(x)
        # x = Activation('relu')(x)

        _, w, h, ch = K.int_shape(x)
        pool2 = AveragePooling2D(pool_size = (w, h), strides = (1, 1))(x)
        flat1 = Flatten()(pool2)
        outputs = Dense(num_outputs, kernel_initializer = init,
                        activation = 'softmax')(flat1)

        model = Model(inputs = inputs, outputs = outputs)
        return model
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def initial_block(inp, nb_filter=13, nb_row=3, nb_col=3, strides=(2, 2)):
    conv = Conv2D(nb_filter, (nb_row, nb_col), padding='same', strides=strides)(inp)
    max_pool = MaxPooling2D()(inp)
    merged = concatenate([conv, max_pool], axis=3)
    return merged
项目:ai-bs-summer17    作者:uchibe    | 项目源码 | 文件源码
def createModel(self):

        model = Sequential()
        model.add(Conv2D(16, (3, 3), strides=(2, 2), input_shape=(self.img_rows, self.img_cols, self.img_channels)))
        model.add(Activation('relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Conv2D(16, (3, 3), strides=(2, 2)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation('relu'))
        # model.add(Dropout(0.5))
        model.add(Dense(self.output_size))
        # model.add(Activation('softmax'))
        # model.compile(RMSprop(lr=self.learningRate), 'MSE')
        # sgd = SGD(lr=self.learningRate)
        adam = Adam(lr=self.learningRate)
        model.compile(loss='mse', optimizer=adam)
        model.summary()

        return model
项目:keras-yolo    作者:BrainsGarden    | 项目源码 | 文件源码
def get_maxpool(params):
    return MaxPooling2D(
        strides=params.get('stride', 1), 
        pool_size=params.get('size', 1), 
        padding="same")
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def create_model(model, x_shape, y_shape, variable_scope='pixels-', dimsize=256, **kwargs):
    with tf.variable_scope(variable_scope):
        X_image = tf.placeholder(tf.float32, [None] + list(x_shape[1:]), name='X')
        conv1 = Convolution2D(32, 3, 3, border_mode='same', activation=K.relu, W_regularizer=l2(0.01),
                                        input_shape=x_shape[1:])(X_image)
        pool1 = MaxPooling2D(pool_size=(2,2), border_mode='same')(conv1)
        drop1 = Dropout(0.5)(pool1)
        conv2 = Convolution2D(64, 5, 5, border_mode='same', activation=K.relu, W_regularizer=l2(0.01))(drop1)
        pool2 = MaxPooling2D(pool_size=(2,2), border_mode='same')(conv2)
        drop2 = Dropout(0.5)(pool2)
        drop2_flat = tf.reshape(drop2, [-1, 3*3*64])
        hidden1 = Dense(1024, W_regularizer=l2(0.01), activation=K.relu)(drop2_flat)
        drop_h1 = Dropout(0.5)(hidden1)
        hidden2 = Dense(128, W_regularizer=l2(0.01), activation=K.relu)(drop_h1)
        drop_h2 = Dropout(0.5)(hidden2)
        hidden3 = Dense(32, W_regularizer=l2(0.01), activation=K.relu)(drop_h2)
        drop_h3 = Dropout(0.5)(hidden3)

        num_classes = tuple([dimsize]*y_shape[1])
        print(num_classes)
        if model == 'multinomial':
            dist_model = MultinomialLayer(drop_h3, 32, num_classes, **kwargs)
        elif model == 'gmm':
            dist_model = DiscreteParametricMixtureLayer(drop_h3, 32, num_classes, **kwargs)
        elif model == 'lmm':
            dist_model = DiscreteLogisticMixtureLayer(drop_h3, 32, num_classes, **kwargs)
        elif model == 'sdp':
            dist_model = LocallySmoothedMultiscaleLayer(drop_h3, 32, num_classes, **kwargs)
        else:
            raise Exception('Unknown model type: {0}'.format(model))

        return X_image, dist_model
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def neural_network(self, X):
    """pi, mu, sigma = NN(x; theta)"""
    X_image = tf.reshape(X, [-1,IMAGE_ROWS,IMAGE_COLS,1])
    conv1 = Convolution2D(32, 5, 5, border_mode='same', activation=K.relu, W_regularizer=l2(0.01),
                          input_shape=(IMAGE_ROWS, IMAGE_COLS, 1))(X_image)
    pool1 = MaxPooling2D(pool_size=(2,2), border_mode='same')(conv1)
    conv2 = Convolution2D(64, 5, 5, border_mode='same', activation=K.relu, W_regularizer=l2(0.01))(pool1)
    pool2 = MaxPooling2D(pool_size=(2,2), border_mode='same')(conv2)
    pool2_flat = tf.reshape(pool2, [-1, IMAGE_ROWS//4 * IMAGE_COLS//4 * 64])
    hidden1 = Dense(1024, W_regularizer=l2(0.01), activation=K.relu)(pool2_flat)
    hidden2 = Dense(64, W_regularizer=l2(0.01), activation=K.relu)(hidden1)
    self.mus = Dense(self.K)(hidden2)
    self.sigmas = Dense(self.K, activation=K.softplus)(hidden2)
    self.pi = Dense(self.K, activation=K.softmax)(hidden2)
项目:eva-didi    作者:eljefec    | 项目源码 | 文件源码
def pool_and_conv(x):
    x = MaxPooling2D()(x)
    x = Conv2D(32, kernel_size=3, strides=(2,2))(x)
    return x
项目:AdversarialMachineLearning_COMP551    作者:arunrawlani    | 项目源码 | 文件源码
def vgg19(input_shape):
    base_model = VGG19(weights='imagenet', include_top=False, input_shape=input_shape)

    # add a global spatial average pooling layer
    x = base_model.output
    x = MaxPooling2D()(x)
    # let's add a fully-connected layer
    x = Flatten()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.5)(x)
    #x = Dense(512, activation='relu')(x)
    #x = Dropout(0.5)(x)
    # and a logistic layer -- let's say we have 200 classes
    predictions = Dense(10, activation='softmax')(x)

    # this is the model we will train
    model = Model(input=base_model.input, output=predictions)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in base_model.layers:
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    # model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])

    return model
    #return predictions
项目:AdversarialMachineLearning_COMP551    作者:arunrawlani    | 项目源码 | 文件源码
def vgg19(input_shape):
    base_model = VGG19(weights='imagenet', include_top=False, input_shape=input_shape)

    # add a global spatial average pooling layer
    x = base_model.output
    x = MaxPooling2D()(x)
    # let's add a fully-connected layer
    x = Flatten()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.5)(x)
    #x = Dense(512, activation='relu')(x)
    #x = Dropout(0.5)(x)
    # and a logistic layer -- let's say we have 200 classes
    predictions = Dense(10, activation='softmax')(x)

    # this is the model we will train
    model = Model(input=base_model.input, output=predictions)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in base_model.layers:
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    # model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])

    return model
#return predictions
项目:gym-unrealcv    作者:zfw1226    | 项目源码 | 文件源码
def createModel(self):
        input_shape = (self.img_channels, self.img_rows, self.img_cols)
        if K.image_dim_ordering() == 'tf':
            input_shape = ( self.img_rows, self.img_cols, self.img_channels)

        model = Sequential()
        model.add(Convolution2D(16, 3, 3,border_mode='same', input_shape = input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Convolution2D(32, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))


        model.add(Convolution2D(64, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())


        model.add(Dense(256))
        model.add(Activation('relu'))
        model.add(Dropout(0.25))
        model.add(Dense(256))
        model.add(Activation('relu'))
        model.add(Dropout(0.25))
        model.add(Dense(self.output_size,activation='linear'))
        model.compile(Adam(lr=self.learningRate), 'MSE')
        model.summary()


        return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def block_SchwartzImage(image,dropoutRate,active=True):
    '''
    returns flattened output
    '''

    if active:
        image =   Convolution2D(64, (8,8)  , border_mode='same', activation='relu',
                                kernel_initializer='lecun_uniform', name='swz_conv0')(image)
        image = MaxPooling2D(pool_size=(2, 2), name='swz_maxpool0')(image)
        image = Dropout(dropoutRate)(image)
        image =   Convolution2D(64, (4,4) , border_mode='same', activation='relu',
                                kernel_initializer='lecun_uniform', name='swz_conv1')(image)
        image = MaxPooling2D(pool_size=(2, 2), name='swz_maxpool1')(image)
        image = Dropout(dropoutRate)(image)
        image =   Convolution2D(64, (4,4)  , border_mode='same', activation='relu',
                                kernel_initializer='lecun_uniform', name='swz_conv2')(image)
        image = MaxPooling2D(pool_size=(2, 2), name='swz_maxpool2')(image)
        image = Dropout(dropoutRate)(image)
        image = Flatten()(image)

    else:
        #image=Cropping2D(crop)(image)#cut almost all of the 20x20 pixels
        image = Flatten()(image)
        image = Dense(1,kernel_initializer='zeros',trainable=False, name='swz_conv_off')(image)#effectively multipy by 0

    return image
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def Schwartz_gluon_model(Inputs,nclasses,dropoutRate=-1):
     x =   Convolution2D(64, (8,8)  , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(Inputs[1])
     x = MaxPooling2D(pool_size=(2, 2))(x)
     x =   Convolution2D(64, (4,4) , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(x)
     x = MaxPooling2D(pool_size=(2, 2))(x)
     x =   Convolution2D(64, (4,4)  , border_mode='same', activation='relu',kernel_initializer='lecun_uniform')(x)
     x = MaxPooling2D(pool_size=(2, 2))(x)
     x = Flatten()(x)
     x = merge( [x, Inputs[1]] , mode='concat')
    # linear activation for regression and softmax for classification
     x = Dense(128, activation='relu',kernel_initializer='lecun_uniform')(x)     
     predictions = [Dense(2, activation='linear',init='normal')(x),Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform')(x)]
     model = Model(inputs=Inputs, outputs=predictions)
     return model
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                            # padding='same',
                            strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other = MaxPooling2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    return encoder
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def __create_wide_residual_network(nb_classes, img_input, include_top, depth=28, width=8, dropout=0.0):
    ''' Creates a Wide Residual Network with specified parameters

    Args:
        nb_classes: Number of output classes
        img_input: Input tensor or layer
        include_top: Flag to include the last dense layer
        depth: Depth of the network. Compute N = (n - 4) / 6.
               For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
               For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
               For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
        width: Width of the network.
        dropout: Adds dropout if value is greater than 0.0

    Returns:a Keras Model
    '''

    N = (depth - 4) // 6

    x = __conv1_block(img_input)
    nb_conv = 4

    for i in range(N):
        x = __conv2_block(x, width, dropout)
        nb_conv += 2

    x = MaxPooling2D((2, 2))(x)

    for i in range(N):
        x = __conv3_block(x, width, dropout)
        nb_conv += 2

    x = MaxPooling2D((2, 2))(x)

    for i in range(N):
        x = ___conv4_block(x, width, dropout)
        nb_conv += 2

    x = AveragePooling2D((8, 8))(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(nb_classes, activation='softmax')(x)

    return x
项目:object-detection-with-deep-learning    作者:neerajdixit    | 项目源码 | 文件源码
def get_model():
    """
        Defines the CNN model architecture and returns the model.
        The architecture is the same as I developed for project 2
        https://github.com/neerajdixit/Traffic-Sign-classifier-with-Deep-Learning
        with an additional normalization layer in front and
        a final fully connected layer of size 5 since we have 5 different type of objects in our data set.
    """

    # Create a Keras sequential model
    model = Sequential()
    #model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3)))
    # Add a normalization layer to normalize between -0.5 and 0.5.
    model.add(Lambda(lambda x: x / 255. - .5,input_shape=(im_x,im_y,im_z), name='norm'))
    # Add a convolution layer with Input = 32x32x3. Output = 30x30x6. Strides 1 and VALID padding.
    # Perform RELU activation 
    model.add(Convolution2D(6, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', name='conv1'))
    # Add a convolution layer with Input = 30x30x6. Output = 28x28x9. Strides 1 and VALID padding.
    # Perform RELU activation 
    model.add(Convolution2D(9, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', name='conv2'))
    # Add Pooling layer with Input = 28x28x9. Output = 14x14x9. 2x2 kernel, Strides 2 and VALID padding
    model.add(MaxPooling2D(pool_size=(2, 2), border_mode='valid', name='pool1'))
    # Add a convolution layer with Input 14x14x9. Output = 12x12x12. Strides 1 and VALID padding.
    # Perform RELU activation 
    model.add(Convolution2D(12, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', name='conv3'))
    # Add a convolution layer with Input = 30x30x6. Output = 28x28x9. Strides 1 and VALID padding.
    # Perform RELU activation 
    model.add(Convolution2D(16, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', name='conv4'))
    # Add Pooling layer with Input = 10x10x16. Output = 5x5x16. 2x2 kernel, Strides 2 and VALID padding
    model.add(MaxPooling2D(pool_size=(2, 2), border_mode='valid', name='pool2'))
    # Flatten. Input = 5x5x16. Output = 400.
    model.add(Flatten(name='flat1'))
    # Add dropout layer with 0.2  
    model.add(Dropout(0.2, name='dropout1'))
    # Add Fully Connected layer. Input = 400. Output = 220
    # Perform RELU activation 
    model.add(Dense(220, activation='relu', name='fc1'))
    # Add Fully Connected layer. Input = 220. Output = 43
    # Perform RELU activation 
    model.add(Dense(43, activation='relu', name='fc2'))
    # Add Fully Connected layer. Input = 43. Output = 5
    # Perform RELU activation 
    model.add(Dense(5, name='fc3'))
    # Configure the model for training with Adam optimizer
    # "mean squared error" loss objective and accuracy metrics
    # Learning rate of 0.001 was chosen because this gave best performance after testing other values
    model.compile(optimizer=Adam(lr=0.001), loss="mse", metrics=['accuracy'])
    return model
项目:drcn    作者:ghif    | 项目源码 | 文件源码
def create_convnet(self, _input, dense_dim=1000, dy=10, nb_filters=[64, 128], kernel_size=(3, 3), pool_size=(2, 2), 
        dropout=0.5, bn=True, output_activation='softmax', opt='adam'):

        """
        Create convnet model / encoder of DRCN

        Args:
            _input (Tensor)        : input layer
            dense_dim (int)            : dimensionality of the final dense layers 
            dy (int)           : output dimensionality
            nb_filter (list)       : list of #Conv2D filters
            kernel_size (tuple)    : Conv2D kernel size
            pool_size (tuple)      : MaxPool kernel size
            dropout (float)        : dropout rate
            bn (boolean)           : batch normalization mode
            output_activation (string) : act. function for output layer
            opt (string)           : optimizer

        Store the shared layers into self.enc_functions list
        """

        _h = _input

        self.enc_functions = [] # to store the shared layers, will be used later for constructing conv. autoencoder
        for i, nf in enumerate(nb_filters):
            enc_f = Conv2D(nf, kernel_size, padding='same')
            _h = enc_f(_h)
            self.enc_functions.append(enc_f)

            _h = Activation('relu')(_h)

            if i < 2:
                _h = MaxPooling2D(pool_size=pool_size, padding='same')(_h)

        _h = Flatten()(_h)      

        enc_f = Dense(dense_dim)
        _h = enc_f(_h) 
        self.enc_functions.append(enc_f)
        if bn:
            _h = BatchNormalization()(_h)
        _h = Activation('relu')(_h)
        _h = Dropout(dropout)(_h)

        enc_f = Dense(dense_dim)
        _h = enc_f(_h)
        self.enc_functions.append(enc_f)
        if bn:
            _h = BatchNormalization()(_h)
        _feat = Activation('relu')(_h)
        _h = Dropout(dropout)(_feat)

        _y = Dense(dy, activation=output_activation)(_h)

        # convnet
        self.convnet_model = Model(input=_input, output=_y)
        self.convnet_model.compile(loss='categorical_crossentropy', optimizer=opt)
        print(self.convnet_model.summary())

        self.feat_model = Model(input=_input, output=_feat)
项目:Keras-GAN-Animeface-Character    作者:forcecore    | 项目源码 | 文件源码
def build_discriminator( shape, build_disc=True ) :
    '''
    Build discriminator.
    Set build_disc=False to build an encoder network to test
    the encoding/discrimination capability with autoencoder...
    '''
    def conv2d( x, filters, shape=(4, 4), **kwargs ) :
        '''
        I don't want to write lengthy parameters so I made a short hand function.
        '''
        x = Conv2D( filters, shape, strides=(2, 2),
            padding='same',
            kernel_initializer=Args.kernel_initializer,
            **kwargs )( x )
        #x = MaxPooling2D()( x )
        x = BatchNormalization(momentum=Args.bn_momentum)( x )
        x = LeakyReLU(alpha=Args.alpha_D)( x )
        return x

    # https://github.com/tdrussell/IllustrationGAN
    # As proposed by them, unlike GAN hacks, MaxPooling works better for anime dataset it seems.
    # However, animeGAN doesn't use it so I'll keep it more similar to DCGAN.

    face = Input( shape=shape )
    x = face

    # Warning: Don't batchnorm the first set of Conv2D.
    x = Conv2D( 64, (4, 4), strides=(2, 2),
        padding='same',
        kernel_initializer=Args.kernel_initializer )( x )
    x = LeakyReLU(alpha=Args.alpha_D)( x )
    # 32x32

    x = conv2d( x, 128 )
    # 16x16

    x = conv2d( x, 256 )
    # 8x8

    x = conv2d( x, 512 )
    # 4x4

    if build_disc:
        x = Flatten()(x)
        # add 16 features. Run 1D conv of size 3.
        #x = MinibatchDiscrimination(16, 3)( x )

        #x = Dense(1024, kernel_initializer=Args.kernel_initializer)( x )
        #x = LeakyReLU(alpha=Args.alpha_D)( x )

        # 1 when "real", 0 when "fake".
        x = Dense(1, activation='sigmoid',
            kernel_initializer=Args.kernel_initializer)( x )
        return models.Model( inputs=face, outputs=x )
    else:
        # build encoder.
        x = Conv2D(Args.noise_shape[2], (4, 4), activation='tanh')(x)
        return models.Model( inputs=face, outputs=x )
项目:leap-scd    作者:smittal6    | 项目源码 | 文件源码
def seq(x_train,y_train,x_val,y_val,x_test,y_test):
        #Defining the structure of the neural network
        #Creating a Network, with 2 Convolutional layers
        model=Sequential()
        # model.add(Conv2D(128,(3,5),activation='relu',input_shape=(1,39,40)))
        # model.add(Conv2D(64,(3,5)))
        # model.add(MaxPooling2D((2,2)))
        # model.add(Flatten())
        model.add(Dense(512,activation='relu',input_shape=(780,)))
        model.add(Dense(512,activation='relu')) #Fully connected layer 1
        # model.add(Dropout(0.5))
        model.add(Dense(2,activation='softmax')) #Output Layer
        model.summary()
        # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+')
        # print f >> model.summary()
        data_saver(str(model.to_json()))
        # f.close()
        sgd=SGD(lr=0.1)
        early_stopping=EarlyStopping(monitor='val_loss',patience=4)
        reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,factor=0.5,min_lr=0.0000001)
        #Compilation region: Define optimizer, cost function, and the metric?
        model.compile(optimizer=sgd,loss='binary_crossentropy',metrics=['accuracy'])

        #Fitting region:Get to fit the model, with training data
        checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True)

        #Doing the training[fitting]
        model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr])
        model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model
        model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state

        ### SAVING THE VALIDATION DATA ###
        scores=model.predict(x_val,batch_size=batch)
        sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores.
        classes=model.predict_classes(x_train,batch_size=batch)
        ### ------------- ###

        ### SAVING THE TESTING DATA ###
        #scores_test=model.predict(x_test,batch_size=batch)
        #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test})
        ### ------------- ###
        # print model.evaluate(x_test,y_test,batch_size=batch)

        #predictions=model.predict(x_val,batch_size=batch)
        #print "Shape of predictions: ", predictions.shape
        #print "Shape of y_test: ",y_test.shape
        return classes

#Non-function section

#y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers
项目:leap-scd    作者:smittal6    | 项目源码 | 文件源码
def seq(x_train,y_train,x_val,y_val,x_test,y_test):
        #Defining the structure of the neural network
        #Creating a Network, with 2 Convolutional layers
        model=Sequential()
        # model.add(Conv2D(128,(3,5),activation='relu',input_shape=(1,39,40)))
        # model.add(Conv2D(64,(3,5)))
        # model.add(MaxPooling2D((2,2)))
        # model.add(Flatten())
        model.add(Dense(256,activation='relu',input_shape=(3904,)))
        model.add(Dense(512,activation='relu')) #Fully connected layer 1
        model.add(Dropout(0.25))
        model.add(Dense(512,activation='relu')) #Fully connected layer 1
        model.add(Dropout(0.25))
        model.add(Dense(2,activation='softmax')) #Output Layer
        model.summary()
        # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+')
        # print f >> model.summary()
        data_saver("##### -------- #####")
        data_saver(str(model.to_json()))
        # f.close()
        sgd=SGD(lr=1)
        early_stopping=EarlyStopping(monitor='val_loss',patience=6)
        reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,factor=0.5,min_lr=0.0000001)
        #Compilation region: Define optimizer, cost function, and the metric?
        model.compile(optimizer=sgd,loss='binary_crossentropy',metrics=['accuracy'])

        #Fitting region:Get to fit the model, with training data
        checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True)

        #Doing the training[fitting]
        model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr])
        model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model
        model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state

        ### SAVING THE VALIDATION DATA ###
        scores=model.predict(x_val,batch_size=batch)
        sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores.
        classes=model.predict_classes(x_val,batch_size=batch)
        ### ------------- ###

        ### SAVING THE TESTING DATA ###
        #scores_test=model.predict(x_test,batch_size=batch)
        #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test})
        ### ------------- ###
        # print model.evaluate(x_test,y_test,batch_size=batch)

        #predictions=model.predict(x_val,batch_size=batch)
        #print "Shape of predictions: ", predictions.shape
        print "Training 0 class: ",len(np.where(y_train[:,0]==1)[0])
        print "Training 1 class: ",len(np.where(y_train[:,1]==1)[0])
        return classes

#Non-function section

#y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers
项目:leap-scd    作者:smittal6    | 项目源码 | 文件源码
def seq(x_train,y_train,x_val,y_val,x_test,y_test):
        #Defining the structure of the neural network
        #Creating a Network, with 2 Convolutional layers
        model=Sequential()
        model.add(Conv2D(128,(2,5),activation='relu',input_shape=(1,39,20)))
        model.add(Conv2D(128,(2,3)))
        model.add(Conv2D(64,(2,3)))
        model.add(MaxPooling2D((2,2)))
        model.add(Flatten())
        model.add(Dense(1024,activation='relu')) #Fully connected layer 1
        model.add(Dropout(0.5))
        model.add(Dense(2,activation='softmax')) #Output Layer
        model.summary()
        # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+')
        # print f >> model.summary()
        data_saver(str(model.to_json()))
        # f.close()
        sgd=SGD(lr=0.1)
        early_stopping=EarlyStopping(monitor='val_loss',patience=4)
        reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,min_lr=0.0000001)
        #Compilation region: Define optimizer, cost function, and the metric?
        model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])

        #Fitting region:Get to fit the model, with training data
        checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True)

        #Doing the training[fitting]
        model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr])
        model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model
        model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state

        ### SAVING THE VALIDATION DATA ###
        scores=model.predict(x_val,batch_size=batch)
        sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores.
        classes=model.predict_classes(x_train,batch_size=batch)
        ### ------------- ###

        ### SAVING THE TESTING DATA ###
        #scores_test=model.predict(x_test,batch_size=batch)
        #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test})
        ### ------------- ###
        # print model.evaluate(x_test,y_test,batch_size=batch)

        #predictions=model.predict(x_val,batch_size=batch)
        #print "Shape of predictions: ", predictions.shape
        #print "Shape of y_test: ",y_test.shape
        return classes

#Non-function section

#y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers
项目:leap-scd    作者:smittal6    | 项目源码 | 文件源码
def seq(x_train,y_train,x_val,y_val,x_test,y_test):
        #Defining the structure of the neural network
        #Creating a Network, with 2 Convolutional layers
        model=Sequential()
        # model.add(Conv2D(128,(3,5),activation='relu',input_shape=(1,39,40)))
        # model.add(Conv2D(64,(3,5)))
        # model.add(MaxPooling2D((2,2)))
        # model.add(Flatten())
        model.add(Dense(256,activation='relu',input_shape=(5184,)))
        model.add(Dense(512,activation='relu')) #Fully connected layer 1
        # model.add(Dropout(0.5))
        model.add(Dense(512,activation='relu')) #Fully connected layer 1
        model.add(Dropout(0.5))
        model.add(Dense(2,activation='softmax')) #Output Layer
        model.summary()
        # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+')
        # print f >> model.summary()
        data_saver("##### -------- #####")
        data_saver(str(model.to_json()))
        # f.close()
        sgd=SGD(lr=1)
        early_stopping=EarlyStopping(monitor='val_loss',patience=6)
        reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,factor=0.5,min_lr=0.0000001)
        #Compilation region: Define optimizer, cost function, and the metric?
        model.compile(optimizer=sgd,loss='binary_crossentropy',metrics=['accuracy'])

        #Fitting region:Get to fit the model, with training data
        checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True)

        #Doing the training[fitting]
        model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr])
        model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model
        model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state

        ### SAVING THE VALIDATION DATA ###
        scores=model.predict(x_val,batch_size=batch)
        sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores.
        classes=model.predict_classes(x_val,batch_size=batch)
        ### ------------- ###

        ### SAVING THE TESTING DATA ###
        #scores_test=model.predict(x_test,batch_size=batch)
        #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test})
        ### ------------- ###
        # print model.evaluate(x_test,y_test,batch_size=batch)

        #predictions=model.predict(x_val,batch_size=batch)
        #print "Shape of predictions: ", predictions.shape

        data_saver(str(len(np.where(y_train[:,0]==1)[0])))
        data_saver(str(len(np.where(y_train[:,1]==1)[0])))
        print "Training 0 class: ",len(np.where(y_train[:,0]==1)[0])
        print "Training 1 class: ",len(np.where(y_train[:,1]==1)[0])
        return classes

#Non-function section

#y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers
项目:leap-scd    作者:smittal6    | 项目源码 | 文件源码
def seq(x_train,y_train,x_val,y_val,x_test,y_test):
        #Defining the structure of the neural network
        #Creating a Network, with 2 Convolutional layers
        model=Sequential()
        model.add(Conv2D(64,(7,5),activation='relu',input_shape=(1,40,20)))
        model.add(Conv2D(128,(5,3),activation='relu',padding='same'))
        model.add(Conv2D(256,(3,3),activation='relu'))
        model.add(MaxPooling2D((5,2)))
        model.add(Flatten())
        model.add(Dense(256,activation='relu')) #Fully connected layer 1
        model.add(Dropout(0.5))
        model.add(Dense(256,activation='relu')) #Fully connected layer 1
        model.add(Dropout(0.5))
        model.add(Dense(2,activation='softmax')) #Output Layer
        model.summary()
        # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+')
        # print f >> model.summary()
        data_saver("##### ------ #####")
        data_saver(str(model.to_json()))
        # f.close()
        #Compilation region: Define optimizer, cost function, and the metric?
        sgd=SGD(lr=1)
        early_stopping=EarlyStopping(monitor='val_loss',patience=4)
        reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,factor=0.5)
        model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])

        #Fitting region:Get to fit the model, with training data
        checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True)

        #Doing the training[fitting]
        model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr])
        model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model
        model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state

        ### SAVING THE VALIDATION DATA ###
        scores=model.predict(x_val,batch_size=batch)
        sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores.
        classes=model.predict_classes(x_val,batch_size=batch)
        ### ------------- ###

        ### SAVING THE TESTING DATA ###
        #scores_test=model.predict(x_test,batch_size=batch)
        #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test})
        ### ------------- ###
        # print model.evaluate(x_test,y_test,batch_size=batch)

        #predictions=model.predict(x_val,batch_size=batch)
        #print "Shape of predictions: ", predictions.shape
        #print "Shape of y_test: ",y_test.shape
        return classes

#Non-function section

#y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers