Python keras.layers.advanced_activations 模块,PReLU() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.advanced_activations.PReLU()

项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def create_Kao_Onet( weight_path = 'model48.h5'):
    input = Input(shape = [48,48,3])
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1,2],name='prelu1')(x)
    x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1,2],name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1,2],name='prelu3')(x)
    x = MaxPool2D(pool_size=2)(x)
    x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
    x = PReLU(shared_axes=[1,2],name='prelu4')(x)
    x = Permute((3,2,1))(x)
    x = Flatten()(x)
    x = Dense(256, name='conv5') (x)
    x = PReLU(name='prelu5')(x)

    classifier = Dense(2, activation='softmax',name='conv6-1')(x)
    bbox_regress = Dense(4,name='conv6-2')(x)
    landmark_regress = Dense(10,name='conv6-3')(x)
    model = Model([input], [classifier, bbox_regress, landmark_regress])
    model.load_weights(weight_path, by_name=True)

    return model
项目:batchA3C    作者:ssamot    | 项目源码 | 文件源码
def build_network(num_actions, agent_history_length, resized_width, resized_height):
    state = tf.placeholder("float", [None, agent_history_length, resized_width, resized_height])

    inputs_v = Input(shape=(agent_history_length, resized_width, resized_height,))
    #model_v  = Permute((2, 3, 1))(inputs_v)

    model_v = Convolution2D(nb_filter=16, nb_row=8, nb_col=8, subsample=(4,4), activation='relu', border_mode='same')(inputs_v)
    model_v = Convolution2D(nb_filter=32, nb_row=4, nb_col=4, subsample=(2,2), activation='relu', border_mode='same')(model_v)
    model_v = Flatten()(model_v)
    model_v = Dense(output_dim=512)(model_v)
    model_v = PReLU()(model_v)


    action_probs = Dense(name="p", output_dim=num_actions, activation='softmax')(model_v)

    state_value = Dense(name="v", output_dim=1, activation='linear')(model_v)


    value_network = Model(input=inputs_v, output=[state_value, action_probs])


    return state, value_network
项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def create_Kao_Rnet (weight_path = 'model24.h5'):
    input = Input(shape=[24, 24, 3])  # change this shape to [None,None,3] to enable arbitraty shape input
    x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
    x = MaxPool2D(pool_size=3,strides=2, padding='same')(x)

    x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)

    x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
    x = Permute((3, 2, 1))(x)
    x = Flatten()(x)
    x = Dense(128, name='conv4')(x)
    x = PReLU( name='prelu4')(x)
    classifier = Dense(2, activation='softmax', name='conv5-1')(x)
    bbox_regress = Dense(4, name='conv5-2')(x)
    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def build(inp, dropout_rate=0.01):
    enet = initial_block(inp)
    enet = BatchNormalization(momentum=0.1)(enet)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    enet = PReLU(shared_axes=[1, 2])(enet)
    enet = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate)  # bottleneck 1.0
    for _ in range(4):
        enet = bottleneck(enet, 64, dropout_rate=dropout_rate)  # bottleneck 1.i

    enet = bottleneck(enet, 128, downsample=True)  # bottleneck 2.0
    # bottleneck 2.x and 3.x
    for _ in range(2):
        enet = bottleneck(enet, 128)  # bottleneck 2.1
        enet = bottleneck(enet, 128, dilated=2)  # bottleneck 2.2
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.3
        enet = bottleneck(enet, 128, dilated=4)  # bottleneck 2.4
        enet = bottleneck(enet, 128)  # bottleneck 2.5
        enet = bottleneck(enet, 128, dilated=8)  # bottleneck 2.6
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.7
        enet = bottleneck(enet, 128, dilated=16)  # bottleneck 2.8
    return enet
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def build(inp, dropout_rate=0.01):
    pooling_indices = []
    enet, indices_single = initial_block(inp)
    enet = BatchNormalization(momentum=0.1)(enet)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    enet = PReLU(shared_axes=[1, 2])(enet)
    pooling_indices.append(indices_single)
    enet, indices_single = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate)  # bottleneck 1.0
    pooling_indices.append(indices_single)
    for _ in range(4):
        enet = bottleneck(enet, 64, dropout_rate=dropout_rate)  # bottleneck 1.i

    enet, indices_single = bottleneck(enet, 128, downsample=True)  # bottleneck 2.0
    pooling_indices.append(indices_single)
    # bottleneck 2.x and 3.x
    for _ in range(2):
        enet = bottleneck(enet, 128)  # bottleneck 2.1
        enet = bottleneck(enet, 128, dilated=2)  # bottleneck 2.2
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.3
        enet = bottleneck(enet, 128, dilated=4)  # bottleneck 2.4
        enet = bottleneck(enet, 128)  # bottleneck 2.5
        enet = bottleneck(enet, 128, dilated=8)  # bottleneck 2.6
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.7
        enet = bottleneck(enet, 128, dilated=16)  # bottleneck 2.8
    return enet, pooling_indices
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def GatedPixelCNN(input_shape, filters, depth, latent=None, build=True):
    height, width, channels = input_shape
    palette = 256 # TODO: Make it scalable to any amount of palette.

    input_img = Input(shape=input_shape, name=str(channels)+'_channels_'+str(palette)+'_palette')

    latent_vector = None
    if latent is not None:
        latent_vector = Input(shape=(latent,), name='latent_vector')

    model = GatedCNNs(filters, depth, latent_vector)(*GatedCNN(filters, latent_vector)(input_img))

    for _ in range(2):
        model = Convolution2D(filters, 1, 1, border_mode='valid')(model)
        model = PReLU()(model)

    outs = OutChannels(*input_shape, masked=False, palette=palette)(model)

    if build:
        model = Model(input=[input_img, latent_vector] if latent is not None else input_img, output=outs)
        model.compile(optimizer=Nadam(), loss='binary_crossentropy' if channels == 1 else 'sparse_categorical_crossentropy')

    return model
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def PixelCNN(input_shape, filters, depth, build=True):
    height, width, channels = input_shape
    palette = 256 # TODO: Make it scalable to any amount of palette.

    input_img = Input(shape=input_shape, name=str(channels)+'_channels_'+str(palette)+'_palette')

    model = MaskedConvolution2D(filters, 7, 7, mask='A', border_mode='same', name='masked2d_A')(input_img)

    model = ResidualBlockList(filters, depth)(model)
    model = PReLU()(model)

    for _ in range(2):
        model = MaskedConvolution2D(filters, 1, 1, border_mode='valid')(model)
        model = PReLU()(model)

    outs = OutChannels(*input_shape, masked=True, palette=palette)(model)

    if build:
        model = Model(input=input_img, output=outs)
        model.compile(optimizer=Nadam(), loss='binary_crossentropy' if channels == 1 else 'sparse_categorical_crossentropy')

    return model
项目:RIDDLE    作者:jisungk    | 项目源码 | 文件源码
def create_base_model(nb_features, nb_classes, learning_rate=0.02):
    model = Sequential() 

    # input layer + first hidden layer 
    model.add(Dense(512, kernel_initializer='lecun_uniform', input_shape=(nb_features,)))
    model.add(PReLU()) 
    model.add(Dropout(0.5)) 

    # additional hidden layer
    model.add(Dense(512, kernel_initializer='lecun_uniform')) 
    model.add(PReLU()) 
    model.add(Dropout(0.75)) 

    # output layer 
    model.add(Dense(nb_classes, kernel_initializer='lecun_uniform')) 
    model.add(Activation('softmax')) 

    model.compile(loss='categorical_crossentropy', 
        optimizer=Adam(lr=learning_rate), metrics=['accuracy'])  

    return model
项目:kaggle-allstate-claims-severity    作者:alno    | 项目源码 | 文件源码
def nn_mlp(input_shape, params):
    model = Sequential()

    for i, layer_size in enumerate(params['layers']):
        reg = regularizer(params)

        if i == 0:
            model.add(Dense(layer_size, init='he_normal', W_regularizer=reg, input_shape=input_shape))
        else:
            model.add(Dense(layer_size, init='he_normal', W_regularizer=reg))

        if params.get('batch_norm', False):
            model.add(BatchNormalization())

        if 'dropouts' in params:
            model.add(Dropout(params['dropouts'][i]))

        model.add(PReLU())

    model.add(Dense(1, init='he_normal'))

    return model
项目:Exoplanet-Artificial-Intelligence    作者:pearsonkyle    | 项目源码 | 文件源码
def make_wave(maxlen):
    model = Sequential()
    # conv1
    model.add(Dense(64,input_dim=maxlen, kernel_initializer='he_normal',bias_initializer='zeros' ) )
    model.add(PRELU())
    model.add(Dropout(0.25))

    model.add(Dense(32))
    model.add(PRELU())

    model.add(Dense(8))
    model.add(PRELU())

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    SGDsolver = SGD(lr=0.1, momentum=0.25, decay=0.0001, nesterov=True)
    model.compile(loss='binary_crossentropy',
                optimizer=SGDsolver,
                metrics=['accuracy'])
    return model
项目:kaggler-template    作者:jeongyoonlee    | 项目源码 | 文件源码
def nn_model(dims):
    model = Sequential()

    model.add(Dense(400, input_dim=dims, init='he_normal'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Dense(200, init='he_normal'))
    model.add(PReLU())
    model.add(BatchNormalization())    
    model.add(Dropout(0.2))

    model.add(Dense(50, init='he_normal'))
    model.add(PReLU())
    model.add(BatchNormalization())    
    model.add(Dropout(0.2))

    model.add(Dense(1, init='he_normal'))
    model.compile(loss = 'mae', optimizer = 'adadelta')
    return(model)
项目:Kaggle_Allstate    作者:sadz2201    | 项目源码 | 文件源码
def nn_model():
    model = Sequential()

    model.add(Dense(400, input_dim = xtrain.shape[1], init = 'he_normal')) #400
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Dense(120, init = 'he_normal')) #200
    model.add(PReLU())
    model.add(BatchNormalization())    
    model.add(Dropout(0.2))

    model.add(Dense(30, init = 'he_normal')) #50
    model.add(PReLU())
    model.add(BatchNormalization())    
    model.add(Dropout(0.1))                 #0.2

    model.add(Dense(1, init = 'he_normal'))
    model.compile(loss = 'mae', optimizer = 'adadelta')
    return(model)
项目:Kaggle_Allstate    作者:sadz2201    | 项目源码 | 文件源码
def nn_model():
    model = Sequential()

    model.add(Dense(425, input_dim = xtrain.shape[1], init = 'he_normal')) #425
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.4)) #0.4

    model.add(Dense(200, init = 'he_normal')) #225
    model.add(PReLU())
    model.add(BatchNormalization())    
    model.add(Dropout(0.3))                 #0.3

    model.add(Dense(40, init = 'he_normal')) #60
    model.add(PReLU())
    model.add(BatchNormalization())    
    model.add(Dropout(0.15))                 #0.1

    model.add(Dense(1, init = 'he_normal'))
    model.compile(loss = 'mae', optimizer = 'adam')
    return(model)
项目:Kaggle_Allstate    作者:sadz2201    | 项目源码 | 文件源码
def nn_model():
    model = Sequential()

    model.add(Dense(450, input_dim = xtrain.shape[1], init = 'he_normal')) #400
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Dense(225, init = 'he_normal')) #220
    model.add(PReLU())
    model.add(BatchNormalization())    
    model.add(Dropout(0.25))                 #0.2

    model.add(Dense(60, init = 'he_normal')) #50
    model.add(PReLU())
    model.add(BatchNormalization())    
    model.add(Dropout(0.15))                 #0.1

    model.add(Dense(1, init = 'he_normal'))
    model.compile(loss = 'mae', optimizer = 'eve')
    return(model)
项目:crime_prediction    作者:livenb    | 项目源码 | 文件源码
def create_net():
    model = Sequential()

    model.add(Dense(400, input_dim = X_train.shape[1], init = 'he_normal'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.4))

    model.add(Dense(200, init = 'he_normal'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Dense(50, init = 'he_normal'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Dense(output_dim=10, init = 'he_normal'))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='sgd',
                  metrics=['categorical_accuracy'])
    return(model)
项目:kaggle-quora-solution-8th    作者:qqgeogor    | 项目源码 | 文件源码
def MLP(opt='nadam'):

    X_raw=Input(shape=(LEN_RAW_INPUT,),name='input_raw')

    fc1=BatchNormalization()(X_raw)
    fc1=Dense(512)(fc1)
    fc1=PReLU()(fc1)
    fc1=Dropout(0.25)(fc1)

    fc1=BatchNormalization()(fc1)
    fc1=Dense(256)(fc1)
    fc1=PReLU()(fc1)
    fc1=Dropout(0.15)(fc1)

    fc1=BatchNormalization()(fc1)
    auxiliary_output_dense = Dense(1, activation='sigmoid', name='aux_output_dense')(fc1)


    output_all = Dense(1,activation='sigmoid',name='output')(fc1)
    model=Model(input=X_raw,output=output_all)
    model.compile(
                optimizer=opt,
                loss = 'binary_crossentropy')
    return model
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=310, init='he_normal'))
            model.add(LeakyReLU(alpha=.001))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=310,output_dim=252, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.5))
            model.add(Dense(input_dim=252,output_dim=128, init='he_normal'))
            model.add(LeakyReLU(alpha=.001))
            model.add(BatchNormalization())
            model.add(Dropout(0.4))
            model.add(Dense(input_dim=128,output_dim=2, init='he_normal', activation='softmax'))
            #model.add(Activation('softmax'))
            sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=62, init='he_normal'))
            model.add(LeakyReLU(alpha=.001))
            model.add(Dropout(0.3))
            model.add(Dense(input_dim=62,output_dim=158, init='he_normal'))
            model.add(LeakyReLU(alpha=.001))
            model.add(Dropout(0.25))
            model.add(Dense(input_dim=158,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))
            #model.add(Activation('softmax'))
            sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=100,output_dim=380, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=380,output_dim=50, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=105, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=105,output_dim=280, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=280,output_dim=60, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=100,output_dim=180, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=180,output_dim=50, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.5))
            model.add(Dense(input_dim=50,output_dim=30, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=30,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=100,output_dim=360, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=360,output_dim=50, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=110,output_dim=350, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=350,output_dim=50, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.3))
            model.add(Dense(input_dim=110,output_dim=300, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.5))
            model.add(Dense(input_dim=300,output_dim=60, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.1))
            model.add(Dense(input_dim=100,output_dim=300, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=300,output_dim=50, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=105, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=105,output_dim=200, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=200,output_dim=60, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.5))
            model.add(Dense(input_dim=60,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.1))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=140, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=140,output_dim=380, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=380,output_dim=50, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=100, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=100,output_dim=360, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=360,output_dim=50, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=50,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.1))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.007, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=110,output_dim=350, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=350,output_dim=150, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=150,output_dim=20, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.2))
            model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.02, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:stacking    作者:ikki407    | 项目源码 | 文件源码
def build_model(self):
            model = Sequential()
            model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,)))
            model.add(Dense(input_dim=nn_input_dim_NN, output_dim=110, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.3))
            model.add(Dense(input_dim=110,output_dim=200, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.5))
            model.add(Dense(input_dim=200,output_dim=60, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.6))
            model.add(Dense(input_dim=60,output_dim=80, init='he_normal'))
            model.add(PReLU(init='zero'))
            model.add(BatchNormalization())
            model.add(Dropout(0.3))
            model.add(Dense(input_dim=80,output_dim=2, init='he_normal', activation='softmax'))    
            sgd = SGD(lr=0.01, decay=1e-10, momentum=0.9, nesterov=True)

            model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary')

            return KerasClassifier(nn=model,**self.params)
项目:chess-deep-rl    作者:rajpurkar    | 项目源码 | 文件源码
def conv_wrap(params, conv_out, i):
    from keras.layers.normalization import BatchNormalization
    from keras.layers.advanced_activations import PReLU
    from keras.layers.convolutional import Convolution2D
    from keras.layers import Dropout

    # use filter_width_K if it is there, otherwise use 3
    filter_key = "filter_width_%d" % i
    filter_width = params.get(filter_key, 3)
    num_filters = params["num_filters"]
    conv_out = Convolution2D(
        nb_filter=num_filters,
        nb_row=filter_width,
        nb_col=filter_width,
        init='he_normal',
        border_mode='same')(conv_out)
    conv_out = BatchNormalization()(conv_out)
    conv_out = PReLU()(conv_out)
    if params["dropout"] > 0:
        conv_out = Dropout(params["dropout"])(conv_out)
    return conv_out
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def deep_mlp(self):
        """
        Deep Multilayer Perceptrop.
        """
        if self._config.num_mlp_layers == 0:
            self.add(Dropout(0.5))
        else:
            for j in xrange(self._config.num_mlp_layers):
                self.add(Dense(self._config.mlp_hidden_dim))
                if self._config.mlp_activation == 'elu':
                    self.add(ELU())
                elif self._config.mlp_activation == 'leaky_relu':
                    self.add(LeakyReLU())
                elif self._config.mlp_activation == 'prelu':
                    self.add(PReLU())
                else:
                    self.add(Activation(self._config.mlp_activation))
                self.add(Dropout(0.5))
项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def create_Kao_Pnet( weight_path = 'model12old.h5'):
    input = Input(shape=[None, None, 3])
    x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1,2],name='PReLU1')(x)
    x = MaxPool2D(pool_size=2)(x)
    x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1,2],name='PReLU2')(x)
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1,2],name='PReLU3')(x)
    classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(x)
    bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(x)
    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
项目:kaggle_yt8m    作者:N01Z3    | 项目源码 | 文件源码
def fc_inception(input_tensor, n=3000, d=0.5):

    br1 = Dense(n)(input_tensor)
    br1 = LeakyReLU()(br1)
    br1 = BatchNormalization()(br1)
    br1 = Dropout(d)(br1)
    br1 = Dense(int(n/3.0))(br1)

    br2 = Dense(n)(input_tensor)
    br2 = BatchNormalization()(br2)
    br2 = ELU()(br2)
    br2 = Dropout(d)(br2)
    br2 = Dense(int(n/3.0))(br2)

    br3 = Dense(int(n/3.0))(input_tensor)
    br3 = BatchNormalization()(br3)
    br3 = PReLU()(br3)
    br3 = Dropout(d)(br3)
    br3 = Dense(int(n/3.0))(br3)
    br3 = BatchNormalization()(br3)
    br3 = PReLU()(br3)
    br3 = Dropout(d)(br3)
    br3 = Dense(int(n/3.0))(br3)
    br3 = BatchNormalization()(br3)
    br3 = PReLU()(br3)
    br3 = Dropout(d)(br3)

    x = merge([br1, br2, br3], mode='concat', concat_axis=1)
    return x
项目:auto_ml    作者:doordash    | 项目源码 | 文件源码
def make_deep_learning_model(hidden_layers=None, num_cols=None, optimizer='adam', dropout_rate=0.2, weight_constraint=0, feature_learning=False):

    if feature_learning == True and hidden_layers is None:
        hidden_layers = [1, 1, 0.5]

    if hidden_layers is None:
        hidden_layers = [1, 1, 1]

    # The hidden_layers passed to us is simply describing a shape. it does not know the num_cols we are dealing with, it is simply values of 0.5, 1, and 2, which need to be multiplied by the num_cols
    scaled_layers = []
    for layer in hidden_layers:
        scaled_layers.append(int(num_cols * layer))

    # If we're training this model for feature_learning, our penultimate layer (our final hidden layer before the "output" layer) will always have 10 neurons, meaning that we always output 10 features from our feature_learning model
    if feature_learning == True:
        scaled_layers.append(10)

    model = Sequential()

    model.add(Dense(hidden_layers[0], input_dim=num_cols, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01)))
    model.add(PReLU())

    for layer_size in scaled_layers[1:-1]:
        model.add(Dense(layer_size, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01)))
        model.add(PReLU())

    # There are times we will want the output from our penultimate layer, not the final layer, so give it a name that makes the penultimate layer easy to find
    model.add(Dense(scaled_layers[-1], kernel_initializer='normal', name='penultimate_layer', kernel_regularizer=regularizers.l2(0.01)))
    model.add(PReLU())

    # For regressors, we want an output layer with a single node
    model.add(Dense(1, kernel_initializer='normal'))

    # The final step is to compile the model
    model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['mean_absolute_error', 'mean_absolute_percentage_error'])

    return model
项目:auto_ml    作者:doordash    | 项目源码 | 文件源码
def make_deep_learning_classifier(hidden_layers=None, num_cols=None, optimizer='adam', dropout_rate=0.2, weight_constraint=0, final_activation='sigmoid', feature_learning=False):

    if feature_learning == True and hidden_layers is None:
        hidden_layers = [1, 1, 0.5]

    if hidden_layers is None:
        hidden_layers = [1, 1, 1]

    # The hidden_layers passed to us is simply describing a shape. it does not know the num_cols we are dealing with, it is simply values of 0.5, 1, and 2, which need to be multiplied by the num_cols
    scaled_layers = []
    for layer in hidden_layers:
        scaled_layers.append(int(num_cols * layer))

    # If we're training this model for feature_learning, our penultimate layer (our final hidden layer before the "output" layer) will always have 10 neurons, meaning that we always output 10 features from our feature_learning model
    if feature_learning == True:
        scaled_layers.append(10)


    model = Sequential()

    # There are times we will want the output from our penultimate layer, not the final layer, so give it a name that makes the penultimate layer easy to find
    model.add(Dense(hidden_layers[0], input_dim=num_cols, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01)))
    model.add(PReLU())

    for layer_size in scaled_layers[1:-1]:
        model.add(Dense(layer_size, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.01)))
        model.add(PReLU())

    model.add(Dense(scaled_layers[-1], kernel_initializer='normal', name='penultimate_layer', kernel_regularizer=regularizers.l2(0.01)))
    model.add(PReLU())

    model.add(Dense(1, kernel_initializer='normal', activation=final_activation))
    model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy', 'poisson'])
    return model
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = MaxUnpooling2D()([other, reverse_module])

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        # decoder = Activation('relu')(decoder)
        decoder = PReLU(shared_axes=[1, 2])(decoder)

    return decoder
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_prelu():
    from keras.layers.advanced_activations import PReLU
    layer_test(PReLU, kwargs={},
               input_shape=(2, 3, 4))
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def Wavenet(input_shape, filters, depth, stacks, last=0, h=None, build=True):
    # TODO: Soft targets? A float to make targets a gaussian with stdev.
    # TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding.
    # TODO: Global conditioning?
    # TODO: Local conditioning?

    _, nb_bins = input_shape

    input_audio = Input(input_shape, name='audio_input')

    model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio)

    out, skip_connections = WavenetBlocks(filters, depth, stacks)(model)

    out = Merge(mode='sum', name='merging_skips')(skip_connections)
    out = PReLU()(out)

    out = Convolution1D(nb_bins, 1, border_mode='same')(out)
    out = PReLU()(out)

    out = Convolution1D(nb_bins, 1, border_mode='same')(out)

    # https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif
    if last > 0:
        out = Lambda(lambda x: x[:, -last:], output_shape=(last, out._keras_shape[2]), name='last_out')(out)

    out = Activation('softmax')(out)

    if build:
        model = Model(input_audio, out)
        model.compile(Nadam(), 'sparse_categorical_crossentropy')

    return model
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def __call__(self, model):
        # 2h -> h
        block = PReLU()(model)
        block = MaskedConvolution2D(self.filters//2, 1, 1)(block)

        # h 3x3 -> h
        block = PReLU()(block)
        block = MaskedConvolution2D(self.filters//2, 3, 3, border_mode='same')(block)

        # h -> 2h
        block = PReLU()(block)
        block = MaskedConvolution2D(self.filters, 1, 1)(block)

        return Merge(mode='sum')([model, block])
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['PReLU']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'PReLU')
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def activation(layer, layer_in, layerId):
    out = {}
    if (layer['info']['type'] == 'ReLU'):
        if (layer['params']['negative_slope'] != 0):
            out[layerId] = LeakyReLU(alpha=layer['params']['negative_slope'])(*layer_in)
        else:
            out[layerId] = Activation('relu')(*layer_in)
    elif (layer['info']['type'] == 'PReLU'):
        out[layerId] = PReLU()(*layer_in)
    elif (layer['info']['type'] == 'ELU'):
        out[layerId] = ELU(alpha=layer['params']['alpha'])(*layer_in)
    elif (layer['info']['type'] == 'ThresholdedReLU'):
        out[layerId] = ThresholdedReLU(theta=layer['params']['theta'])(*layer_in)
    elif (layer['info']['type'] == 'Sigmoid'):
        out[layerId] = Activation('sigmoid')(*layer_in)
    elif (layer['info']['type'] == 'TanH'):
        out[layerId] = Activation('tanh')(*layer_in)
    elif (layer['info']['type'] == 'Softmax'):
        out[layerId] = Activation('softmax')(*layer_in)
    elif (layer['info']['type'] == 'SELU'):
        out[layerId] = Activation('selu')(*layer_in)
    elif (layer['info']['type'] == 'Softplus'):
        out[layerId] = Activation('softplus')(*layer_in)
    elif (layer['info']['type'] == 'Softsign'):
        out[layerId] = Activation('softsign')(*layer_in)
    elif (layer['info']['type'] == 'HardSigmoid'):
        out[layerId] = Activation('hard_sigmoid')(*layer_in)
    return out
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def conv_pooling_layer(self, name, kernel_size, filters, kernel_regularizer_l2):
        def f(input):
            layer = Conv2D(kernel_size=kernel_size, filters=filters, name=name, padding='same',
                           kernel_regularizer=regularizers.l2(kernel_regularizer_l2))(input)
            layer = PReLU()(layer)
            layer = keras.layers.MaxPooling2D(name=name + '_maxpooling')(layer)
            return layer

        return f
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def group_layer(self, group_num, filters, name, kernel_regularizer_l2):
        def f(input):
            if group_num == 1:
                tower = Conv2D(filters, (1, 1), name=name + '_conv2d_0_1', padding='same',
                               kernel_initializer=IdentityConv())(input)
                tower = Conv2D(filters, (3, 3), name=name + '_conv2d_0_2', padding='same',
                               kernel_initializer=IdentityConv(),
                               kernel_regularizer=regularizers.l2(kernel_regularizer_l2))(tower)
                tower = PReLU()(tower)
                return tower
            else:
                group_output = []
                for i in range(group_num):
                    filter_num = filters / group_num
                    # if filters = 201, group_num = 4, make sure last group filters num = 51
                    if i == group_num - 1:  # last group
                        filter_num = filters - i * (filters / group_num)

                    tower = Conv2D(filter_num, (1, 1), name=name + '_conv2d_' + str(i) + '_1', padding='same',
                                   kernel_initializer=GroupIdentityConv(i, group_num))(input)
                    tower = Conv2D(filter_num, (3, 3), name=name + '_conv2d_' + str(i) + '_2', padding='same',
                                   kernel_initializer=IdentityConv(),
                                   kernel_regularizer=regularizers.l2(kernel_regularizer_l2))(tower)
                    tower = PReLU()(tower)
                    group_output.append(tower)

                if K.image_data_format() == 'channels_first':
                    axis = 1
                elif K.image_data_format() == 'channels_last':
                    axis = 3
                output = Concatenate(axis=axis)(group_output)

                return output

        return f
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def make_init_model(self):
        models = []

        input_data = Input(shape=self.gl_config.input_shape)
        import random
        init_model_index = random.randint(1, 4)
        init_model_index = 1
        if init_model_index == 1:  # one conv layer with kernel num = 64
            stem_conv_1 = Conv2D(128, 3, padding='same', name='conv2d1' )(input_data)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 2:  # two conv layers with kernel num = 64
            stem_conv_0 = Conv2D(128, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_0 = PReLU()(stem_conv_0)
            stem_conv_1 = Conv2D(128, 3, padding='same', name='conv2d2')(stem_conv_0)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 3:  # one conv layer with a wider kernel num = 128
            stem_conv_1 = Conv2D(256, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 4:  # two conv layers with a wider kernel_num = 128
            stem_conv_0 = Conv2D(256, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_0 = PReLU()(stem_conv_0)
            stem_conv_1 = Conv2D(256, 3, padding='same', name='conv2d2')(stem_conv_0)
            stem_conv_1 = PReLU()(stem_conv_1)
        import keras
        stem_conv_1 = keras.layers.MaxPooling2D(name='maxpooling2d1')(stem_conv_1)
        stem_conv_1 = Conv2D(self.gl_config.nb_class, 3, padding='same', name='conv2d3')(stem_conv_1)
        stem_global_pooling_1 = GlobalMaxPooling2D(name='globalmaxpooling2d1')(stem_conv_1)
        stem_softmax_1 = Activation('softmax', name='activation1')(stem_global_pooling_1)

        model = Model(inputs=input_data, outputs=stem_softmax_1)

        return model
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_conv_prelu_random(self,
                                    model_precision=_MLMODEL_FULL_PRECISION):
        np.random.seed(1988)

        # Define a model
        from keras.layers.advanced_activations import PReLU
        model = Sequential()
        model.add(Conv2D(input_shape = (10, 10, 3),
            filters = 3, kernel_size = (5,5), padding = 'same'))
        model.add(PReLU(shared_axes=[1, 2]))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model, model_precision=model_precision)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_conv_prelu_random(self):
        np.random.seed(1988)

        # Define a model
        from keras.layers.advanced_activations import PReLU
        model = Sequential()
        model.add(Convolution2D(input_shape = (10, 10, 3),
            nb_filter = 3, nb_row = 5, nb_col = 5, border_mode = 'same'))
        model.add(PReLU(shared_axes=[1, 2]))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model)
项目:auto_ml    作者:ClimbsRocks    | 项目源码 | 文件源码
def get_activation_layer(activation):
    if activation == 'LeakyReLU':
        return LeakyReLU()
    if activation == 'PReLU':
        return PReLU()
    if activation == 'ELU':
        return ELU()
    if activation == 'ThresholdedReLU':
        return ThresholdedReLU()

    return Activation(activation)

# TODO: same for optimizers, including clipnorm