Python keras.optimizers 模块,Nadam() 实例源码

我们从Python开源项目中,提取了以下43个代码示例,用于说明如何使用keras.optimizers.Nadam()

项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = RMSprop(lr=LEARN_START)
    #optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png')
    return model
项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32) if K._BACKEND == 'theano' else (32, 32,3))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
    optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png', show_shapes=True)
    return model
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def GatedPixelCNN(input_shape, filters, depth, latent=None, build=True):
    height, width, channels = input_shape
    palette = 256 # TODO: Make it scalable to any amount of palette.

    input_img = Input(shape=input_shape, name=str(channels)+'_channels_'+str(palette)+'_palette')

    latent_vector = None
    if latent is not None:
        latent_vector = Input(shape=(latent,), name='latent_vector')

    model = GatedCNNs(filters, depth, latent_vector)(*GatedCNN(filters, latent_vector)(input_img))

    for _ in range(2):
        model = Convolution2D(filters, 1, 1, border_mode='valid')(model)
        model = PReLU()(model)

    outs = OutChannels(*input_shape, masked=False, palette=palette)(model)

    if build:
        model = Model(input=[input_img, latent_vector] if latent is not None else input_img, output=outs)
        model.compile(optimizer=Nadam(), loss='binary_crossentropy' if channels == 1 else 'sparse_categorical_crossentropy')

    return model
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def PixelCNN(input_shape, filters, depth, build=True):
    height, width, channels = input_shape
    palette = 256 # TODO: Make it scalable to any amount of palette.

    input_img = Input(shape=input_shape, name=str(channels)+'_channels_'+str(palette)+'_palette')

    model = MaskedConvolution2D(filters, 7, 7, mask='A', border_mode='same', name='masked2d_A')(input_img)

    model = ResidualBlockList(filters, depth)(model)
    model = PReLU()(model)

    for _ in range(2):
        model = MaskedConvolution2D(filters, 1, 1, border_mode='valid')(model)
        model = PReLU()(model)

    outs = OutChannels(*input_shape, masked=True, palette=palette)(model)

    if build:
        model = Model(input=input_img, output=outs)
        model.compile(optimizer=Nadam(), loss='binary_crossentropy' if channels == 1 else 'sparse_categorical_crossentropy')

    return model
项目:auto_ml    作者:ClimbsRocks    | 项目源码 | 文件源码
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.)
项目:ABiViRNet    作者:lvapeab    | 项目源码 | 文件源码
def setOptimizer(self, **kwargs):

        """
        Sets a new optimizer for the Translation_Model.
        :param **kwargs:
        """

        # compile differently depending if our model is 'Sequential' or 'Graph'
        if self.verbose > 0:
            logging.info("Preparing optimizer and compiling.")
        if self.params['OPTIMIZER'].lower() == 'adam':
            optimizer = Adam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'rmsprop':
            optimizer = RMSprop(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'nadam':
            optimizer = Nadam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'adadelta':
            optimizer = Adadelta(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'sgd':
            optimizer = SGD(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        else:
            logging.info('\tWARNING: The modification of the LR is not implemented for the chosen optimizer.')
            optimizer = eval(self.params['OPTIMIZER'])
        self.model.compile(optimizer=optimizer, loss=self.params['LOSS'],
                           sample_weight_mode='temporal' if self.params['SAMPLE_WEIGHTS'] else None)
项目:Deep-Learning-para-diagnostico-a-partir-de-imagenes-Biomedicas    作者:pacocp    | 项目源码 | 文件源码
def create_model_RES():

    inp = Input((110, 110, 3))
    cnv1 = Conv2D(64, 3, 3, subsample=[2,2], activation='relu', border_mode='same')(inp)
    r1 = Residual(64, 64, cnv1)
    # An example residual unit coming after a convolutional layer. NOTE: the above residual takes the 64 output channels
    # from the Convolutional2D layer as the first argument to the Residual function
    r2 = Residual(64, 64, r1)
    cnv2 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(r2)
    r3 = Residual(64, 64, cnv2)
    r4 = Residual(64, 64, r3)
    cnv3 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(r4)
    r5 = Residual(128, 128, cnv3)
    r6 = Residual(128, 128, r5)
    maxpool = MaxPooling2D(pool_size=(7, 7))(r6)
    flatten = Flatten()(maxpool)
    dense1 = Dense(128, activation='relu')(flatten)
    out = Dense(2, activation='softmax')(dense1)

    model = Model(input=inp, output=out)
    model.compile(loss='categorical_crossentropy',
    optimizer=Nadam(lr=1e-4), metrics=['accuracy'])

    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_nadam():
    _test_optimizer(Nadam())
项目:eva    作者:israelg99    | 项目源码 | 文件源码
def Wavenet(input_shape, filters, depth, stacks, last=0, h=None, build=True):
    # TODO: Soft targets? A float to make targets a gaussian with stdev.
    # TODO: Train only receptive field. The temporal-first outputs are computed from zero-padding.
    # TODO: Global conditioning?
    # TODO: Local conditioning?

    _, nb_bins = input_shape

    input_audio = Input(input_shape, name='audio_input')

    model = CausalAtrousConvolution1D(filters, 2, mask_type='A', atrous_rate=1, border_mode='valid')(input_audio)

    out, skip_connections = WavenetBlocks(filters, depth, stacks)(model)

    out = Merge(mode='sum', name='merging_skips')(skip_connections)
    out = PReLU()(out)

    out = Convolution1D(nb_bins, 1, border_mode='same')(out)
    out = PReLU()(out)

    out = Convolution1D(nb_bins, 1, border_mode='same')(out)

    # https://storage.googleapis.com/deepmind-live-cms/documents/BlogPost-Fig2-Anim-160908-r01.gif
    if last > 0:
        out = Lambda(lambda x: x[:, -last:], output_shape=(last, out._keras_shape[2]), name='last_out')(out)

    out = Activation('softmax')(out)

    if build:
        model = Model(input_audio, out)
        model.compile(Nadam(), 'sparse_categorical_crossentropy')

    return model
项目:main    作者:rmkemker    | 项目源码 | 文件源码
def __init__(self, hidden_layer_shape = [64], weight_decay=1e-4,
                 batch_normalization=True, activation='relu', save_fname=None,
                 patience = 6, lr=2e-3, min_lr = 2e-6, verbose = 2, mu=None,
                 refit = False, gpu_list = None, optimizer=None, nb_epochs=1000,
                 kernel_initializer = 'glorot_normal', lr_patience = 3):

        self.model = Sequential()
        self.hidden = hidden_layer_shape
        self.wd = weight_decay
        self.bn = batch_normalization
        self.activation = activation
        self.fname = save_fname
        self.patience = patience
        self.lr = lr
        self.min_lr = min_lr
        self.verbose = verbose
        self.mu = mu
        self.epochs = nb_epochs
        self.refit = refit
        self.gpus = gpu_list
        self.ki = kernel_initializer
        self.lr_patience = lr_patience

        if optimizer is None:
            self.opt = Nadam(self.lr)

        if self.refit:
            raise NotImplementedError('I have not implemented the refit functionality yet.')
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_nadam():
    _test_optimizer(Nadam())
项目:neural-decoder    作者:Krastanov    | 项目源码 | 文件源码
def create_model(L, hidden_sizes=[4], hidden_act='tanh', act='sigmoid', loss='binary_crossentropy',
                 Z=True, X=False, learning_rate=0.002,
                 normcentererr_p=None, batchnorm=0):
    in_dim = L**2 * (X+Z)
    out_dim = 2*L**2 * (X+Z)
    model = Sequential()
    model.add(Dense(int(hidden_sizes[0]*out_dim), input_dim=in_dim, kernel_initializer='glorot_uniform'))
    if batchnorm:
        model.add(BatchNormalization(momentum=batchnorm))
    model.add(Activation(hidden_act))
    for s in hidden_sizes[1:]:
        model.add(Dense(int(s*out_dim), kernel_initializer='glorot_uniform'))
        if batchnorm:
            model.add(BatchNormalization(momentum=batchnorm))
        model.add(Activation(hidden_act))
    model.add(Dense(out_dim, kernel_initializer='glorot_uniform'))
    if batchnorm:
        model.add(BatchNormalization(momentum=batchnorm))
    model.add(Activation(act))
    c = CodeCosts(L, ToricCode, Z, X, normcentererr_p)
    losses = {'e_binary_crossentropy':c.e_binary_crossentropy,
              's_binary_crossentropy':c.s_binary_crossentropy,
              'se_binary_crossentropy':c.se_binary_crossentropy}
    model.compile(loss=losses.get(loss,loss),
                  optimizer=Nadam(lr=learning_rate),
                  metrics=[c.triv_no_error, c.e_binary_crossentropy, c.s_binary_crossentropy]
                 )
    return model
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_nadam():
    _test_optimizer(Nadam())
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_c = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_C,
                                    trainable=False)
        embedding_layer_q = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_Q,
                                      trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_A,
                                      trainable=False)
        context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_context = embedding_layer_c(context)
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        l_lstm_c = Bidirectional(LSTM(60))(embedded_context)
        l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
        l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)

        concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
        relu_c_q = Dense(100, activation='relu')(concat_c_q)
        relu_c_q = Dropout(0.25)(relu_c_q)
        concat_c_q_a = concatenate([l_lstm_a, relu_c_q], axis = 1)

        softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
        self.model = Model([question, answer, context], softmax_c_q_a)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_c = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_C,
                                    trainable=False)
        embedding_layer_q = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_Q,
                                      trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_A,
                                      trainable=False)
        context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_context = embedding_layer_c(context)
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        l_lstm_c = Bidirectional(LSTM(60, return_sequences=True))(embedded_context)
        l_lstm_c = Bidirectional(LSTM(60))(l_lstm_c)
        l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
        l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)

        concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
        relu_c_q = Dense(100, activation='relu')(concat_c_q)
        relu_c_q = Dropout(0.25)(relu_c_q)
        concat_c_q_a = concatenate([l_lstm_a, relu_c_q], axis = 1)

        softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
        self.model = Model([question, answer, context], softmax_c_q_a)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_q = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_Q,
                                      trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_A,
                                      trainable=False)
        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
        l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)

        concat_c_q_a = concatenate([l_lstm_a, l_lstm_q], axis = 1)

        softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
        self.model = Model([question, answer], softmax_c_q_a)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_c = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_C,
                                    trainable=False)
        embedding_layer_q = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_Q,
                                      trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                      EMBEDDING_DIM,
                                      weights=[embedding_matrix],
                                      input_length=MAX_SEQUENCE_LENGTH_A,
                                      trainable=False)
        context = Input(shape=(MAX_SEQUENCE_LENGTH_C,), dtype='int32', name='context')
        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_context = embedding_layer_c(context)
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        l_lstm_c = Bidirectional(LSTM(60))(embedded_context)
        l_lstm_q = Bidirectional(LSTM(60))(embedded_question)
        l_lstm_a = Bidirectional(LSTM(60))(embedded_answer)

        concat_c_q = concatenate([l_lstm_q, l_lstm_c], axis=1)
        relu_c_q = Dense(100, activation='tanh')(concat_c_q)
        concat_c_a = concatenate([l_lstm_a, l_lstm_c], axis=1)
        relu_c_a = Dense(100, activation='tanh')(concat_c_a)
        relu_c_q = Dropout(0.5)(relu_c_q)
        relu_c_a = Dropout(0.5)(relu_c_a)
        concat_c_q_a = merge([relu_c_a, relu_c_q], mode='cos')
        softmax_c_q_a = Dense(2, activation='softmax')(concat_c_q_a)
        self.model = Model([question, answer, context], softmax_c_q_a)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])
项目:kaggle-quora-solution-8th    作者:qqgeogor    | 项目源码 | 文件源码
def MLP(opt='nadam'):

    X_raw=Input(shape=(LEN_RAW_INPUT,),name='input_raw')

    fc1=BatchNormalization()(X_raw)
    fc1=Dense(256)(fc1)
    fc1=PReLU()(fc1)
    fc1=Dropout(0.2)(fc1)

    fc1=BatchNormalization()(fc1)
    fc1=Dense(256)(fc1)
    fc1=PReLU()(fc1)
    #fc1=Dropout(0.2)(fc1)

    fc1=BatchNormalization()(fc1)
    auxiliary_output_dense = Dense(1, activation='sigmoid', name='aux_output_dense')(fc1)


    output_all = Dense(1,activation='sigmoid',name='output')(fc1)
    model=Model(input=X_raw,output=output_all)
    model.compile(
                optimizer=opt,
                loss = 'binary_crossentropy')
    return model


#nadam=Nadam(lr=0.000)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        #model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(512, kernel_initializer='Orthogonal')(input_)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        ''' #example:
        from keras import layers
        from keras import models
        from keras import optimizers
        input_ = layers.Input(shape=(self.input_dims_,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.2, seed=1)(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
        '''
        raise Exception('implement this!')

    #@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        #model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(512, kernel_initializer='Orthogonal')(input_)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        #model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        #model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(512, kernel_initializer='Orthogonal')(input_)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        #model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(512, kernel_initializer='Orthogonal')(input_)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        #model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(1024, kernel_initializer='Orthogonal')(model)
        #model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        model = layers.BatchNormalization()(model)
        model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(512, kernel_initializer='Orthogonal')(model)
        #model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        model = layers.BatchNormalization()(model)
        model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        model = layers.BatchNormalization()(model)
        model = layers.advanced_activations.PReLU()(model)
        #model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        #model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def build_keras_model(self):
        input_ = layers.Input(shape=(self.input_dims_,))
        #model = layers.noise.GaussianNoise(0.005)(input_)
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.2)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.Activation('selu')(model)
        #model = layers.noise.AlphaDropout(0.1, seed=1)(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.4)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    cname = sys._getframe().f_code.co_name
    def build_model(input_dims):
        from keras import layers
        from keras import models
        from keras import optimizers
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(1024, kernel_initializer='Orthogonal')(input_)
        model = layers.BatchNormalization()(model)
        model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.7)(model)
        model = layers.Dense(256, kernel_initializer='Orthogonal')(model)
        model = layers.BatchNormalization()(model)
        model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)
        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        model = layers.BatchNormalization()(model)
        model = layers.advanced_activations.PReLU()(model)
        model = layers.Dense(1, activation='sigmoid')(model)
        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy',
                      optimizer = optimizers.Nadam(),
                      #optimizer = optimizers.SGD(),
                      metrics = ['binary_accuracy'])
        #print(model.summary(line_length=120))
        return model
    keras_base(train2, y, test2, v, z, build_model, 9, cname, base_seed=42)

#@tf_force_cpu
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_resnet1(train2, y, test2, v, z):
    cname = sys._getframe().f_code.co_name
    def build_model(input_dims):
        from keras import layers
        from keras import models
        from keras import optimizers
        input_ = layers.Input(shape=(input_dims,))
        resnet_dims = max(input_dims * 2, 128)
        model = layers.Dense(resnet_dims,
                             kernel_initializer='Orthogonal',
                             activation=layers.advanced_activations.PReLU())(input_)
        model = layers.BatchNormalization()(model)

        for n in range(20):
            shortcut = model
            model = layers.Dense(resnet_dims,
                                 kernel_initializer='Orthogonal')(model)
            model = layers.BatchNormalization()(model)
            model = layers.advanced_activations.PReLU()(model)
            model = layers.Dense(resnet_dims,
                                 kernel_initializer='Orthogonal')(model)
            model = layers.BatchNormalization()(model)
            model = layers.add([model, shortcut])
            model = layers.advanced_activations.PReLU()(model)

        #model = layers.Dropout(0.9)(model)
        model = layers.Dense(16,
                             kernel_initializer='Orthogonal',
                             activation=layers.advanced_activations.PReLU())(model)
        model = layers.BatchNormalization()(model)
        model = layers.Dense(1,
                             activation='sigmoid')(model)
        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy',
                      optimizer = optimizers.Nadam(),
                      #optimizer = optimizers.SGD(),
                      metrics = ['binary_accuracy'])
        #print(model.summary(line_length=120))
        return model
    keras_base(train2, y, test2, v, z, build_model, 9, cname, base_seed=42)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:mlbootcamp_5    作者:ivan-filonov    | 项目源码 | 文件源码
def keras_mlp1(train2, y, test2, v, z):
    from keras import layers
    from keras import models
    from keras import optimizers
    cname = sys._getframe().f_code.co_name
    num_splits = 9
    scaler = preprocessing.RobustScaler()
    train3 = scaler.fit_transform(train2)
    test3 = scaler.transform(test2)
    input_dims = train3.shape[1]
    def build_model():
        input_ = layers.Input(shape=(input_dims,))
        model = layers.Dense(256, kernel_initializer='Orthogonal')(input_)
        #model = layers.BatchNormalization()(model)
        #model = layers.advanced_activations.PReLU()(model)
        model = layers.Activation('selu')(model)
        #model = layers.Dropout(0.7)(model)

        model = layers.Dense(64, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)
        #model = layers.Dropout(0.9)(model)

        model = layers.Dense(16, kernel_initializer='Orthogonal')(model)
        #model = layers.BatchNormalization()(model)
        model = layers.Activation('selu')(model)
        #model = layers.advanced_activations.PReLU()(model)

        model = layers.Dense(1, activation='sigmoid')(model)

        model = models.Model(input_, model)
        model.compile(loss = 'binary_crossentropy', optimizer = optimizers.Nadam())
        #print(model.summary(line_length=120))
        return model
    keras_common(train3, y, test3, v, z, num_splits, cname, build_model)
项目:Kaggler    作者:qqgeogor    | 项目源码 | 文件源码
def build_model(X,dim=128):

    inputs_p = Input(shape=(1,), dtype='int32')

    embed_p = Embedding(
                    num_q,
                    dim,
                    dropout=0.2,
                    input_length=1
                    )(inputs_p)

    inputs_d = Input(shape=(1,), dtype='int32')

    embed_d = Embedding(
                    num_e,
                    dim,
                    dropout=0.2,
                    input_length=1
                    )(inputs_d)


    flatten_p= Flatten()(embed_p)

    flatten_d= Flatten()(embed_d)

    flatten = merge([
                flatten_p,
                flatten_d,
                ],mode='concat')

    fc1 = Dense(512)(flatten)
    fc1 = SReLU()(fc1)
    dp1 = Dropout(0.7)(fc1)

    outputs = Dense(1,activation='sigmoid',name='outputs')(dp1)

    inputs = [
                inputs_p,
                inputs_d,
            ]



    model = Model(input=inputs, output=outputs)
    nadam = Nadam()
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(
                optimizer=nadam,
                loss= 'binary_crossentropy'
              )

    return model
项目:StockRecommendSystem    作者:doncat99    | 项目源码 | 文件源码
def lstm_model(self):
        model = Sequential()
        first = True
        for idx in range(len(self.paras.model['hidden_layers'])):
            if idx == (len(self.paras.model['hidden_layers']) - 1):
                model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=False))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))
            elif first == True:
                model.add(LSTM(input_shape=(None, int(self.paras.n_features)),
                               units=int(self.paras.model['hidden_layers'][idx]),
                               return_sequences=True))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))
                first = False
            else:
                model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=True))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))

        if self.paras.model['optimizer'] == 'sgd':
            #optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
            optimizer = optimizers.SGD(lr=self.paras.model['learning_rate'], decay=1e-6, momentum=0.9, nesterov=True)
        elif self.paras.model['optimizer'] == 'rmsprop':
            #optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.RMSprop(lr=self.paras.model['learning_rate']/10, rho=0.9, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adagrad':
            #optimizer = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.Adagrad(lr=self.paras.model['learning_rate'], epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adam':
            #optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adadelta':
            optimizer = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adamax':
            optimizer = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'nadam':
            optimizer = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
        else:
            optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        # output layer
        model.add(Dense(units=self.paras.model['out_layer']))
        model.add(Activation(self.paras.model['out_activation']))
        model.compile(loss=self.paras.model['loss'], optimizer=optimizer, metrics=['accuracy'])

        return model
项目:job-salary-prediction    作者:soton-data-mining    | 项目源码 | 文件源码
def predict(self):

        def get_weights(model, layer_id):
            layer = model.layers[layer_id]
            weights = layer.get_weights()
            firstWeights = weights[1]
            print(firstWeights)

        def export_model(model, name):
            if not (os.path.exists("neural_net_models")):
                os.makedirs("neural_net_models")
            model_json = model.to_json()
            with open("neural_net_models/" + name + ".json", "w") as json_file:
                json_file.write(model_json)
            # serialize weights to HDF5
            model.save_weights("neural_net_models/" + name + ".h5")

        def import_model(model_name):
            json_file = open("neural_net_models/" + model_name + '.json', 'r')
            loaded_model_json = json_file.read()
            json_file.close()
            model = model_from_json(loaded_model_json)
            # load weights into new model
            model.load_weights("neural_net_models/" + model_name + ".h5")
            print("Loaded " + model_name + " from disk")
            return model

        model = import_model('ut_Dense100_L1_m5s3_L2_m1s03_lr07_d1e07')
        """
        model = Sequential()
        model.add(Dense(100, input_dim=85, activation='relu',
                        kernel_initializer=initializers.RandomNormal(
                                mean=5, stddev=3, seed=None)))
        model.add(Dense(1, activation='linear',
                        kernel_initializer=initializers.RandomNormal(
                                mean=1, stddev=0.3, seed=None)))
        """
        # rms = opt.RMSprop(lr=0.01, rho=0.9, epsilon=1e-08, decay =1e-9)
        adadelta = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        # nadam = opt.Nadam(lr=0.05, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
        model.compile(loss='mean_absolute_error', optimizer=adadelta, metrics=[metrics.mae])
        # optimizer='adam'
        model.fit(
                self.x_train, self.y_train,
                validation_data=(self.x_test, self.y_test),
                epochs=1000, batch_size=160000, verbose=1
        )

        export_model(model, 'ut_Dense100_L1_m5s3_L2_m1s03_lr07_d1e07')
        return (self.y_train, self.y_test)
项目:Question-Answering-NNs    作者:nbogdan    | 项目源码 | 文件源码
def __init__(self, word_index, embedding_matrix):
        embedding_layer_q = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_Q,
                                    trainable=False)
        embedding_layer_a = Embedding(len(word_index) + 1,
                                    EMBEDDING_DIM,
                                    weights=[embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH_A,
                                    trainable=False)

        question = Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32', name='question')
        answer = Input(shape=(MAX_SEQUENCE_LENGTH_A,), dtype='int32', name='answer')
        embedded_question = embedding_layer_q(question)
        embedded_answer = embedding_layer_a(answer)

        conv_blocksA = []
        conv_blocksQ = []
        for sz in [3,5]:
            conv = Convolution1D(filters=20,
                                 kernel_size=sz,
                                 padding="valid",
                                 activation="relu",
                                 strides=1)(embedded_answer)
            conv = MaxPooling1D(pool_size=2)(conv)
            conv = Flatten()(conv)
            conv_blocksA.append(conv)
        for sz in [5,7, 9]:
            conv = Convolution1D(filters=20,
                                 kernel_size=sz,
                                 padding="valid",
                                 activation="relu",
                                 strides=1)(embedded_question)
            conv = MaxPooling1D(pool_size=3)(conv)
            conv = Flatten()(conv)
            conv_blocksQ.append(conv)

        z = Concatenate()(conv_blocksA + conv_blocksQ)
        z = Dropout(0.5)(z)
        z = Dense(100, activation="relu")(z)
        softmax_c_q = Dense(2, activation='softmax')(z)
        self.model = Model([question, answer], softmax_c_q)
        opt = Nadam()
        self.model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
                      metrics=['acc'])