Python keras.optimizers 模块,Adam() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.optimizers.Adam()

项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = RMSprop(lr=LEARN_START)
    #optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png')
    return model
项目:namegenderclassifier    作者:joaoalvarenga    | 项目源码 | 文件源码
def train(self, dataset, train_split=0.8, dense_size=32, learning_rate=0.001, batch_size=32, epochs=50, activation='relu'):
        self.__load_dataset(dataset, train_split)

        train_x = np.array(self.__train_data[:, 0].tolist())
        train_y = to_categorical(self.__train_data[:, 1], 2)

        test_x = np.array(self.__test_data[:, 0].tolist())
        test_y = to_categorical(self.__test_data[:, 1], 2)

        print(train_x.shape)
        self.__model = Sequential()
        self.__model.add(Dense(dense_size, input_dim=train_x.shape[1], activation=activation, init='glorot_uniform'))
        self.__model.add(Dense(train_y.shape[1], activation='softmax', init='glorot_uniform'))
        self.__model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['categorical_accuracy'])

        self.__model.fit(train_x, train_y, batch_size=batch_size, nb_epoch=epochs, validation_data=(test_x, test_y), verbose=2)
项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32) if K._BACKEND == 'theano' else (32, 32,3))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
    optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png', show_shapes=True)
    return model
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def compile_scae(model, lr=None):
        '''
        Compile the model
        '''

        # Optimizer values
        lr = 0.02 if lr is None else lr
        beta_1 = 0.9
        beta_2 = 0.999
        epsilon = 10 ** (-8)
        optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, clipnorm=1.)

        model.compile(
            optimizer=optimizer,
            loss=[lambda y_true, y_pred: y_pred],
        )

        return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def largeann(input_shape, n_classes, layers=3, neurons=2000, dropout=0.35 ):
    """
    for working with extracted features
    """
#    gpu = switch_gpu()
#    with K.tf.device('/gpu:{}'.format(gpu)):
#        K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
    model = Sequential(name='ann')
#    model.gpu = gpu
    for l in range(layers):
        model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
    return model

#%% everyhing recurrent for ANN
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def _init_from_saved(self, fname):

        with open(fname + '_opt.json', 'r') as opt_file:
            self.opt = json.load(opt_file)

        if self.model_type == 'nn':
            if self.model_name == 'cnn_word':
                self.model = self.cnn_word_model()
            if self.model_name == 'lstm_word':
                self.model = self.lstm_word_model()

            optimizer = Adam(lr=self.opt['learning_rate'], decay=self.opt['learning_decay'])
            self.model.compile(loss='binary_crossentropy',
                               optimizer=optimizer,
                               metrics=['binary_accuracy'])
            print('[ Loading model weights %s ]' % fname)
            self.model.load_weights(fname + '.h5')

        if self.model_type == 'ngrams':
            with open(fname + '_cls.pkl', 'rb') as model_file:
                self.model = pickle.load(model_file)
            print('CLS:', self.model)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def getOptimizer(optim, exp_decay, grad_norm_clip, lr = 0.001):
    """Function for setting up optimizer, combines several presets from
    published well performing models on SQuAD."""

    optimizers = {
        'Adam': Adam(lr=lr, decay=exp_decay, clipnorm=grad_norm_clip),
        'Adamax': Adamax(lr=lr, decay=exp_decay, clipnorm=grad_norm_clip),
        'Adadelta': Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, decay=exp_decay, clipnorm=grad_norm_clip)
    }

    try:
        optimizer = optimizers[optim]
    except KeyError as e:
        raise ValueError('problems with defining optimizer: {}'.format(e.args[0]))

    del (optimizers)
    return optimizer

# ------------------------------------------------------------------------------
# Data/model utilities.
# ------------------------------------------------------------------------------
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def _init_from_scratch(self):
        """Initialize a model from scratch."""

        if self.model_name == 'bmwacor':
            self.model = self.bmwacor_model()
        if self.model_name == 'bilstm_split':
            self.model = self.bilstm_split_model()
        if self.model_name == 'full_match':
            self.model = self.full_match_model()
        if self.model_name == 'maxpool_match':
            self.model = self.maxpool_match_model()
        if self.model_name == 'att_match':
            self.model = self.att_match_model()
        if self.model_name == 'maxatt_match':
            self.model = self.maxatt_match_model()
        if self.model_name == 'bilstm_woatt':
            self.model = self.bilstm_woatt_model()
        optimizer = Adam(lr=self.learning_rate)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy', fbeta_score])
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, feature_weights=None):
        print 'Training autoencoder'
        optimizer = Adadelta(lr=1.5)
        # optimizer = Adam()
        # optimizer = Adagrad()
        if feature_weights is None:
            self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse
        else:
            print 'Using weighted loss'
            self.autoencoder.compile(optimizer=optimizer, loss=weighted_binary_crossentropy(feature_weights)) # kld, binary_crossentropy, mse

        self.autoencoder.fit(train_X[0], train_X[1],
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        validation_data=(val_X[0], val_X[1]),
                        callbacks=[
                                    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                                    EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                                    # ModelCheckpoint(self.model_save_path, monitor='val_loss', save_best_only=True, verbose=0),
                        ]
                        )

        return self
项目:Sentiment-Analysis    作者:jasonwu0731    | 项目源码 | 文件源码
def __init__(self, n_classes, vocab_size, max_len, num_units=128,
                 useBiDirection=False, useAttention=False, learning_rate=0.001, dropout=0, embedding_size=300):
        self.model = Sequential()
        self.model.add(Embedding(input_dim=vocab_size,
                                 output_dim=embedding_size, input_length=max_len))
        lstm_model = LSTM(num_units, dropout=dropout)
        if useBiDirection:
            lstm_model = Bidirectional(lstm_model)
        if useAttention:
            lstm_model = lstm_model
            print("Attention not implement yet ... ")
        self.model.add(lstm_model)
        self.model.add(Dense(n_classes, activation='softmax'))

        self.model.summary()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=learning_rate),
                           metrics=['accuracy'])
项目:keras-image-captioning    作者:danieljl    | 项目源码 | 文件源码
def build(self, vocabs=None):
        if self._keras_model:
            return
        if vocabs is None and self._word_vector_init is not None:
            raise ValueError('If word_vector_init is not None, build method '
                             'must be called with vocabs that are not None!')

        image_input, image_embedding = self._build_image_embedding()
        sentence_input, word_embedding = self._build_word_embedding(vocabs)
        sequence_input = Concatenate(axis=1)([image_embedding, word_embedding])
        sequence_output = self._build_sequence_model(sequence_input)

        model = Model(inputs=[image_input, sentence_input],
                      outputs=sequence_output)
        model.compile(optimizer=Adam(lr=self._learning_rate, clipnorm=5.0),
                      loss=categorical_crossentropy_from_logits,
                      metrics=[categorical_accuracy_with_variable_timestep])

        self._keras_model = model
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def report(self,train_data,
               epoch=200,batch_size=1000,optimizer=Adam(0.001),
               test_data=None,
               train_data_to=None,
               test_data_to=None,):
        test_data     = train_data if test_data is None else test_data
        train_data_to = train_data if train_data_to is None else train_data_to
        test_data_to  = test_data  if test_data_to is None else test_data_to
        opts = {'verbose':0,'batch_size':batch_size}
        def test_both(msg, fn):
            print(msg.format(fn(train_data)))
            if test_data is not None:
                print((msg+" (validation)").format(fn(test_data)))
        self.autoencoder.compile(optimizer=optimizer, loss=bce)
        test_both("Reconstruction BCE: {}",
                  lambda data: self.autoencoder.evaluate(data,data,**opts))
        return self
项目:keras-tf-Super-Resolution    作者:olgaliak    | 项目源码 | 文件源码
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
        """
            Creates a model to be used to scale images of specific height and width.
        """
        init = super(ImageSuperResolutionModel, self).create_model(height, width, channels, load_weights, batch_size)

        x = Convolution2D(self.n1, self.f1, self.f1, activation='relu', border_mode='same', name='level1')(init)
        x = Convolution2D(self.n2, self.f2, self.f2, activation='relu', border_mode='same', name='level2')(x)

        out = Convolution2D(channels, self.f3, self.f3, border_mode='same', name='output')(x)

        model = Model(init, out)

        adam = optimizers.Adam(lr=1e-3)
        model.compile(optimizer=adam, loss='mse', metrics=[PSNRLoss])
        if load_weights: model.load_weights(self.weight_path)

        self.model = model
        return model
项目:keras-tf-Super-Resolution    作者:olgaliak    | 项目源码 | 文件源码
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
        """
            Creates a model to be used to scale images of specific height and width.
        """
        init = super(ExpantionSuperResolution, self).create_model(height, width, channels, load_weights, batch_size)

        x = Convolution2D(self.n1, self.f1, self.f1, activation='relu', border_mode='same', name='level1')(init)

        x1 = Convolution2D(self.n2, self.f2_1, self.f2_1, activation='relu', border_mode='same', name='lavel1_1')(x)
        x2 = Convolution2D(self.n2, self.f2_2, self.f2_2, activation='relu', border_mode='same', name='lavel1_2')(x)
        x3 = Convolution2D(self.n2, self.f2_3, self.f2_3, activation='relu', border_mode='same', name='lavel1_3')(x)

        x = merge([x1, x2, x3], mode='ave')

        out = Convolution2D(channels, self.f3, self.f3, activation='relu', border_mode='same', name='output')(x)

        model = Model(init, out)
        adam = optimizers.Adam(lr=1e-3)
        model.compile(optimizer=adam, loss='mse', metrics=[PSNRLoss])
        if load_weights: model.load_weights(self.weight_path)

        self.model = model
        return model
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def get_graph(num_users, num_items, latent_dim):

    model = Graph()
    model.add_input(name='user_input', input_shape=(num_users,))
    model.add_input(name='positive_item_input', input_shape=(num_items,))
    model.add_input(name='negative_item_input', input_shape=(num_items,))

    model.add_node(layer=Dense(latent_dim, input_shape = (num_users,)),
                   name='user_latent',
                   input='user_input')
    model.add_shared_node(layer=Dense(latent_dim, input_shape = (num_items,)), 
                          name='item_latent', 
                          inputs=['positive_item_input', 'negative_item_input'],
                          merge_mode=None, 
                          outputs=['positive_item_latent', 'negative_item_latent'])

    model.add_node(layer=Activation('linear'), name='user_pos', inputs=['user_latent', 'positive_item_latent'], merge_mode='dot', dot_axes=1)
    model.add_node(layer=Activation('linear'), name='user_neg', inputs=['user_latent', 'negative_item_latent'], merge_mode='dot', dot_axes=1)

    model.add_output(name='triplet_loss_out', inputs=['user_pos', 'user_neg'])
    model.compile(loss={'triplet_loss_out': ranking_loss}, optimizer=Adam())#Adagrad(lr=0.1, epsilon=1e-06))

    return model
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def cnn(height, width):
    question_input = Input(shape=(height, width, 1), name='question_input')
    conv1_Q = Conv2D(512, (2, 320), activation='sigmoid', padding='valid',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(question_input)
    Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q)
    F1_Q = Flatten()(Max1_Q)
    Drop1_Q = Dropout(0.25)(F1_Q)
    predictQ = Dense(32, activation='relu',
                     kernel_regularizer=regularizers.l2(0.01),
                     kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(Drop1_Q)
    prediction2 = Dropout(0.25)(predictQ)
    predictions = Dense(1, activation='relu')(prediction2)
    model = Model(inputs=[question_input],
                  outputs=predictions)

    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0))
    # model.compile(loss='mean_squared_error',
    #             optimizer='nadam')
    return model
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def build_model(model, wrapper, dataset, hyperparams, reweighting):
    def build_optimizer(opt, hyperparams):
        return {
            "sgd": SGD(
                lr=hyperparams.get("lr", 0.001),
                momentum=hyperparams.get("momentum", 0.0)
            ),
            "adam": Adam(lr=hyperparams.get("lr", 0.001))
        }[opt]

    model = models.get(model)(dataset.shape, dataset.output_size)
    model.compile(
        optimizer=build_optimizer(
            hyperparams.get("opt", "adam"),
            hyperparams
        ),
        loss=model.loss,
        metrics=model.metrics
    )

    return get_models_dictionary(hyperparams, reweighting)[wrapper](model)
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def VGG_16_KERAS(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.vgg16 import VGG16
    from keras.models import Model

    base_model = VGG16(include_top=True, weights='imagenet')
    x = base_model.layers[-2].output
    del base_model.layers[-1:]
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    vgg16 = Model(input=base_model.input, output=x)

    optim = get_optim('VGG16_KERAS', optim_name, learning_rate)
    vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    # print(vgg16.summary())
    return vgg16


# MIN: 1.00 Fast: 60 sec
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def VGG_16_2_v2(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.vgg16 import VGG16
    from keras.models import Model
    from keras.layers import Input

    input_tensor = Input(shape=(3, 224, 224))
    base_model = VGG16(input_tensor=input_tensor, include_top=False, weights='imagenet')
    x = base_model.output
    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    vgg16 = Model(input=base_model.input, output=x)

    optim = get_optim('VGG16_KERAS', optim_name, learning_rate)
    vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    # print(vgg16.summary())
    return vgg16
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def Xception_wrapper(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.xception import Xception
    from keras.models import Model

    # Only tensorflow
    base_model = Xception(include_top=True, weights='imagenet')
    x = base_model.layers[-2].output
    del base_model.layers[-1:]
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    model = Model(input=base_model.input, output=x)

    optim = get_optim('Xception_wrapper', optim_name, learning_rate)
    model.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    print(model.summary())
    return model
项目:strategy    作者:kanghua309    | 项目源码 | 文件源码
def build_model(self):
        model = Sequential()
        model.add(Dense(self.hidden1, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform'))
        model.add(Dense(self.hidden2, activation='relu', kernel_initializer='glorot_uniform'))
        model.add(Dense(self.action_size, activation='softmax', kernel_initializer='glorot_uniform'))
        model.summary()
        # Using categorical crossentropy as a loss is a trick to easily
        # implement the policy gradient. Categorical cross entropy is defined
        # H(p, q) = sum(p_i * log(q_i)). For the action taken, a, you set
        # p_a = advantage. q_a is the output of the policy network, which is
        # the probability of taking the action a, i.e. policy(s, a).
        # All other p_i are zero, thus we have H(p, q) = A * log(policy(s, a))
        model.compile(loss="categorical_crossentropy", optimizer=Adam(lr=self.learning_rate))
        return model

    # using the output of policy network, pick action stochastically
项目:pydl    作者:rafaeltg    | 项目源码 | 文件源码
def get_optimizer(self):

        if self.opt == 'sgd':
            return k_opt.SGD(lr=self.learning_rate, momentum=self.momentum)

        if self.opt == 'rmsprop':
            return k_opt.RMSprop(lr=self.learning_rate)

        if self.opt == 'adagrad':
            return k_opt.Adagrad(lr=self.learning_rate)

        if self.opt == 'adadelta':
            return k_opt.Adadelta(lr=self.learning_rate)

        if self.opt == 'adam':
            return k_opt.Adam(lr=self.learning_rate)

        raise Exception('Invalid optimization function - %s' % self.opt)
项目:rogueinabox    作者:rogueinabox    | 项目源码 | 文件源码
def build_model(self):
        initializer = initializers.random_normal(stddev=0.02)
        model = Sequential()
        if self.padding:
            model.add(ZeroPadding2D(padding=(1, 0), data_format="channels_first", input_shape=(self.layers, self.rows, self.columns)))
        model.add(Conv2D(32, (8, 8), activation="relu", data_format="channels_first",
                         strides=(4, 4), kernel_initializer=initializer, padding='same',
                         input_shape=(self.layers, self.rows, self.columns)))
        model.add(Conv2D(64, (4, 4), activation="relu", data_format="channels_first", strides=(2, 2),
                         kernel_initializer=initializer, padding='same'))
        model.add(Conv2D(64, (3, 3), activation="relu", data_format="channels_first", strides=(1, 1),
                         kernel_initializer=initializer, padding='same'))
        model.add(Flatten())
        model.add(Dense(512, activation="relu", kernel_initializer=initializer))
        model.add(Dense(self.actions_num, kernel_initializer=initializer))

        adam = Adam(lr=1e-6)
        model.compile(loss='mse', optimizer=adam)
        return model
项目:data-science-bowl-2017    作者:tondonia    | 项目源码 | 文件源码
def create_model_2():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)

    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    x = Flatten()(pool1)
    x = Dense(64, init='normal')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)

    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
项目:data-science-bowl-2017    作者:tondonia    | 项目源码 | 文件源码
def create_model_1():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)

    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    x = Flatten()(pool1)
    x = Dense(64, init='normal')(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)

    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
项目:Snake-Game-AI    作者:elvisun    | 项目源码 | 文件源码
def _build_model(self):
        # Neural Net for Deep-Q learning Model
        model = Sequential()
        #model.add(Conv2D(256, kernel_size = (2,2), activation='relu', input_shape=(self.state_size.shape[0], self.state_size.shape[1],1), padding="same"))
        #model.add(Conv2D(712, kernel_size = (2,2), activation='relu', padding="same"))
        #model.add(Conv2D(128, kernel_size = (2,2), activation='relu', padding="same"))
        model.add(Dense(2048, input_dim=5, activation='relu'))#self.state_size.shape[0] * self.state_size.shape[1]
        #model.add(Flatten())
        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(512, activation='relu'))
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(128, activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(32, activation='relu'))
        model.add(Dense(16, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(8, activation='relu'))
        model.add(Dense(4, activation='linear'))
        model.compile(loss='mse',
                      optimizer=Adam(lr=self.learning_rate))
        return model
项目:RIDDLE    作者:jisungk    | 项目源码 | 文件源码
def create_base_model(nb_features, nb_classes, learning_rate=0.02):
    model = Sequential() 

    # input layer + first hidden layer 
    model.add(Dense(512, kernel_initializer='lecun_uniform', input_shape=(nb_features,)))
    model.add(PReLU()) 
    model.add(Dropout(0.5)) 

    # additional hidden layer
    model.add(Dense(512, kernel_initializer='lecun_uniform')) 
    model.add(PReLU()) 
    model.add(Dropout(0.75)) 

    # output layer 
    model.add(Dense(nb_classes, kernel_initializer='lecun_uniform')) 
    model.add(Activation('softmax')) 

    model.compile(loss='categorical_crossentropy', 
        optimizer=Adam(lr=learning_rate), metrics=['accuracy'])  

    return model
项目:deep-learning-with-Keras    作者:decordoba    | 项目源码 | 文件源码
def __init__(self):
        filters1 = [16, 32, 64]  # filters1 = [4, 8, 16, 32, 64, 128, 256]
        filters2 = [16, 32, 64]  # filters2 = [4, 8, 16, 32, 64, 128, 256]
        losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]  # losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]
        optimizers1 = [optimizers.Adam()]  # optimizers1 = [optimizers.Adadelta(), optimizers.Adagrad(), optimizers.Adam(), optimizers.Adamax(), optimizers.SGD(), optimizers.RMSprop()]
        units1 = [16, 32, 64]  # units1 = [4, 8, 16, 32, 64, 128, 256]
        kernel_sizes1 = [(3, 3)]  # kernel_sizes = [(3, 3), (5, 5)]
        dropouts1 = [0.25]  # dropouts1 = [0.25, 0.5, 0.75]
        dropouts2 = [0.5]  # dropouts2 = [0.25, 0.5, 0.75]
        pool_sizes1 = [(2, 2)]  # pool_sizes1 = [(2, 2)]

        # create standard experiments structure
        self.experiments = {"filters1": filters1,
                            "filters2": filters2,
                            "losses1": losses1,
                            "units1": units1,
                            "optimizers1": optimizers1,
                            "kernel_sizes1": kernel_sizes1,
                            "dropouts1": dropouts1,
                            "dropouts2": dropouts2,
                            "pool_sizes1": pool_sizes1}
项目:deep-learning-with-Keras    作者:decordoba    | 项目源码 | 文件源码
def run_experiment(self, input_shape, labels, comb):
        # comb holds values like (32, (2,2), optimizers-Adam()). We need to use self.keys_mapper
        # which maps a name ("units", "kernel_sizes", "optimizers") to the position where it is
        # in comb. I wonder if it would be more comprehensible with a function like
        # get_element_from_comb(self, comb, key) { return comb[self.keys_mapper[key]] }
        opt = comb[self.keys_mapper["optimizers1"]]
        loss = comb[self.keys_mapper["losses1"]]
        f1 = comb[self.keys_mapper["filters1"]]
        f2 = comb[self.keys_mapper["filters2"]]
        u1 = comb[self.keys_mapper["units1"]]
        ks = comb[self.keys_mapper["kernel_sizes1"]]
        ps = comb[self.keys_mapper["pool_sizes1"]]
        d1 = comb[self.keys_mapper["dropouts1"]]
        d2 = comb[self.keys_mapper["dropouts2"]]
        return (opt, loss,
                Conv2D(f1, kernel_size=ks, activation='relu', input_shape=input_shape),
                Conv2D(f2, kernel_size=ks, activation='relu'),
                MaxPooling2D(pool_size=ps),
                Dropout(d1),
                Flatten(),
                Dense(u1, activation='relu'),
                Dropout(d2),
                Dense(len(labels), activation='softmax'))
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_dqn():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))

    memory = SequentialMemory(limit=1000, window_length=1)
    policy = EpsGreedyQPolicy(eps=.1)
    dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
                   target_model_update=1e-1, policy=policy, enable_double_dqn=False)
    dqn.compile(Adam(lr=1e-3))

    dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
    policy.eps = 0.
    h = dqn.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_double_dqn():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))

    memory = SequentialMemory(limit=1000, window_length=1)
    policy = EpsGreedyQPolicy(eps=.1)
    dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
                   target_model_update=1e-1, policy=policy, enable_double_dqn=True)
    dqn.compile(Adam(lr=1e-3))

    dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
    policy.eps = 0.
    h = dqn.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_duel_dqn():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions, activation='linear'))

    memory = SequentialMemory(limit=1000, window_length=1)
    policy = EpsGreedyQPolicy(eps=.1)
    dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=50,
                   target_model_update=1e-1, policy=policy, enable_double_dqn=False, enable_dueling_network=True)
    dqn.compile(Adam(lr=1e-3))

    dqn.fit(env, nb_steps=2000, visualize=False, verbose=0)
    policy.eps = 0.
    h = dqn.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_sarsa():
    env = TwoRoundDeterministicRewardEnv()
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.n

    # Next, we build a very simple model.
    model = Sequential()
    model.add(Dense(16, input_shape=(1,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions, activation='linear'))

    policy = EpsGreedyQPolicy(eps=.1)
    sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=50, policy=policy)
    sarsa.compile(Adam(lr=1e-3))

    sarsa.fit(env, nb_steps=20000, visualize=False, verbose=0)
    policy.eps = 0.
    h = sarsa.test(env, nb_episodes=20, visualize=False)
    assert_allclose(np.mean(h.history['episode_reward']), 3.)
项目:auto_ml    作者:ClimbsRocks    | 项目源码 | 文件源码
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.)
项目:rldurak    作者:janEbert    | 项目源码 | 文件源码
def create_model(self, epsilon):
        """Return a compiled model and the state and action input
        layers with the given epsilon for numerical stability.
        """
        inputs = Input(shape=(self.state_shape,))
        action_input = Input(shape=(self.action_shape,))
        x1 = Dense(self.neurons_per_layer[0], activation='relu')(inputs)
        x1 = Dense(self.neurons_per_layer[1], activation='relu')(x1)
        x2 = Dense(self.neurons_per_layer[1], activation='relu')(action_input)
        x = add([x1, x2])
        for n in self.neurons_per_layer[2:]:
            x = Dense(n, activation='relu')(x)
        outputs = Dense(self.action_shape)(x)

        model = Model(inputs=[inputs, action_input], outputs=outputs)

        assert self.optimizer_choice in ['adam', 'rmsprop']
        if self.optimizer_choice == 'adam':
            opti = Adam(lr=self.alpha, epsilon=epsilon)
        else:
            opti = RMSprop(lr=self.alpha, epsilon=epsilon)
        model.compile(optimizer=opti, loss='mse')
        return model, inputs, action_input
项目:tartarus    作者:sergiooramas    | 项目源码 | 文件源码
def build_model(config):
    """Builds the cnn."""
    params = config.model_arch
    get_model = getattr(models, 'get_model_'+str(params['architecture']))
    model = get_model(params)
    #model = model_kenun.build_convnet_model(params)
    # Learning setup
    t_params = config.training_params
    sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
              momentum=t_params["momentum"], nesterov=t_params["nesterov"])
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    optimizer = eval(t_params['optimizer'])
    metrics = ['mean_squared_error']
    if config.model_arch["final_activation"] == 'softmax':
        metrics.append('categorical_accuracy')
    if t_params['loss_func'] == 'cosine':
        loss_func = eval(t_params['loss_func'])
    else:
        loss_func = t_params['loss_func']
    model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)

    return model
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ):   # NOTE that this procedure is/should be used with img_rows & img_cols as None

    # aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
    # Our model is six layers deep, consisting  of  a  series  of  three  CONV-RELU-POOL  layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here

    ## 3D CNN version of a previously developed unet_model_xd_6j 
    zconv = clen

    inputs = Input((1, dim, img_rows, img_cols))
    conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
    conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)

    conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
    conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)


    conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
    conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)


    up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
    conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
    conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)


    up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1)  # original - only works for even dim 
    conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
    conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)


    pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)

    conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
    conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
    pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)

    conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
    conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
    pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)

    if (dim < 16):
        conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
    else:   # need one extra layer to get to 1D x 2D mask ...
            conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
            conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
            pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
            conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)        

    model = Model(input=inputs, output=conv8)


    model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
    #model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),  loss=dice_coef_loss, metrics=[dice_coef])

    return model
项目:detection-2016-nipsws    作者:imatge-upc    | 项目源码 | 文件源码
def get_q_network(weights_path):
    model = Sequential()
    model.add(Dense(1024, init=lambda shape, name: normal(shape, scale=0.01, name=name), input_shape=(25112,)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(1024, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(6, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('linear'))
    adam = Adam(lr=1e-6)
    model.compile(loss='mse', optimizer=adam)
    if weights_path != "0":
        model.load_weights(weights_path)
    return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def main():
    # z \in R^100
    latent_dim = 100
    # x \in R^{28x28}
    # generator (z -> x)
    generator = model_generator()
    # discriminator (x -> y)
    discriminator = model_discriminator()
    example_gan(AdversarialOptimizerSimultaneous(), "output/gan-cifar10",
                opt_g=Adam(1e-4, decay=1e-5),
                opt_d=Adam(1e-3, decay=1e-5),
                nb_epoch=100, generator=generator, discriminator=discriminator,
                latent_dim=latent_dim)
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def compile_masked(model, lr=0.0005, num_classes=2):
        beta_1 = 0.9
        beta_2 = 0.999
        epsilon = 10 ** (-8)
        optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, clipnorm=1.)

        loss = [lambda y_true, y_pred: y_pred]

        model.compile(
            optimizer=optimizer,
            loss=loss,

        )
        return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def cnn3adam_slim(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='cnn3adam')
    model.add(Conv1D (kernel_size = (50), filters = 32, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 64, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Conv1D (kernel_size = (5), filters = 64, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten())
    model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense (250, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam())
    return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def cnn3adam_filter(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
    print('use L2 model instead!')
    print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
    model = Sequential(name='cnn3adam_filter')
    model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='elu')) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten(name='conv3'))
    model.add(Dense (1500, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization(name='fc1'))
    model.add(Dropout(0.5))
    model.add(Dense (1500, activation='elu', kernel_initializer='he_normal'))
    model.add(BatchNormalization(name='fc2'))
    model.add(Dropout(0.5))
    model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
    return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def cnn3adam_filter_l2(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
    print('use more L2 model instead!')
    print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
    model = Sequential(name='cnn3adam_filter_l2')
    model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, 
                      kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 256, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())

    model.add(Conv1D (kernel_size = (5), filters = 300, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.005))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten(name='conv3'))
    model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc1'))
    model.add(BatchNormalization(name='bn1'))
    model.add(Dropout(0.5, name='do1'))
    model.add(Dense (1500, activation='relu', kernel_initializer='he_normal',name='fc2'))
    model.add(BatchNormalization(name='bn2'))
    model.add(Dropout(0.5, name='do2'))
    model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
#    print('reset learning rate')
    return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def cnn3adam_filter_morel2_slim(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 3000, 3)
    """
    model = Sequential(name='cnn3adam_filter_morel2_slim')
    model.add(Conv1D (kernel_size = (50), filters = 128, strides=5, input_shape=input_shape, 
                      kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.05))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))

    model.add(Conv1D (kernel_size = (5), filters = 128, strides=1, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Conv1D (kernel_size = (5), filters = 256, strides=2, kernel_initializer='he_normal', activation='relu',kernel_regularizer=keras.regularizers.l2(0.01))) 
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(MaxPooling1D())
    model.add(Flatten(name='conv3'))
    model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc1'))
    model.add(BatchNormalization(name='bn1'))
    model.add(Dropout(0.5, name='do1'))
    model.add(Dense (512, activation='relu', kernel_initializer='he_normal',name='fc2'))
    model.add(BatchNormalization(name='bn2'))
    model.add(Dropout(0.5, name='do2'))
    model.add(Dense(n_classes, activation = 'softmax',name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))
#    print('reset learning rate')
    return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def ann(input_shape, n_classes, layers=2, neurons=80, dropout=0.35 ):
    """
    for working with extracted features
    """
    model = Sequential(name='ann')
    for l in range(layers):
        model.add(Dense (neurons, input_shape=input_shape, activation='elu', kernel_initializer='he_normal'))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
    return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def ann_rnn(input_shape, n_classes):
    """
    for working with extracted features
    """
    model = Sequential(name='ann_rnn')
    model.add(TimeDistributed(Dense (80, activation='elu', kernel_initializer='he_normal'), input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Dropout(0.35))
    model.add(TimeDistributed(Dense (80, activation='elu', kernel_initializer='he_normal')))
    model.add(BatchNormalization())
    model.add(Dropout(0.35))
    model.add(LSTM(50))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
    return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def pure_rnn_do(input_shape, n_classes,layers=2, neurons=80, dropout=0.3):
    """
    just replace ANN by RNNs
    """
    model = Sequential(name='pure_rnn')
    model.add(LSTM(neurons, return_sequences=False if layers==1 else True, input_shape=input_shape,dropout=dropout, recurrent_dropout=dropout))
    for i in range(layers-1):
        model.add(LSTM(neurons, return_sequences=False if i==layers-2 else True,dropout=dropout, recurrent_dropout=dropout))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001), metrics=[keras.metrics.categorical_accuracy])
    return model
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def pure_rnn_3(input_shape, n_classes):
    """
    just replace ANN by 3xRNNs
    """
    model = Sequential(name='pure_rnn')
    model.add(LSTM(80, return_sequences=True, input_shape=input_shape))
    model.add(LSTM(80, return_sequences=True))
    model.add(LSTM(50, return_sequences=False))
    model.add(Dense(n_classes, activation = 'softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[keras.metrics.categorical_accuracy])
    return model



#%%
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def _init_from_scratch(self):
        if self.model_name == 'log_reg':
            self.model = self.log_reg_model()
        if self.model_name == 'svc':
            self.model = self.svc_model()
        if self.model_name == 'cnn_word':
            self.model = self.cnn_word_model()
        if self.model_name == 'lstm_word':
            self.model = self.lstm_word_model()

        if self.model_type == 'nn':
            optimizer = Adam(lr=self.opt['learning_rate'], decay=self.opt['learning_decay'])
            self.model.compile(loss='binary_crossentropy',
                               optimizer=optimizer,
                               metrics=['binary_accuracy'])
项目:deepcut    作者:rkcosmos    | 项目源码 | 文件源码
def get_convo_nn2(no_word=200, n_gram=21, no_char=178):
    input1 = Input(shape=(n_gram,))
    input2 = Input(shape=(n_gram,))

    a = Embedding(no_char, 32, input_length=n_gram)(input1)
    a = SpatialDropout1D(0.15)(a)
    a = BatchNormalization()(a)

    a_concat = []
    for i in range(1,9):
        a_concat.append(conv_unit(a, n_gram, no_word, window = i))
    for i in range(9,12):
        a_concat.append(conv_unit(a, n_gram, no_word-50, window = i))
    a_concat.append(conv_unit(a, n_gram, no_word-100, window = 12))
    a_sum = Maximum()(a_concat)

    b = Embedding(12, 12, input_length=n_gram)(input2)
    b = SpatialDropout1D(0.15)(b)

    x = Concatenate(axis=-1)([a, a_sum, b])
    #x = Concatenate(axis=-1)([a_sum, b])
    x = BatchNormalization()(x)

    x = Flatten()(x)
    x = Dense(100, activation='relu')(x)
    out = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=[input1, input2], outputs=out)
    model.compile(optimizer=Adam(),
                  loss='binary_crossentropy', metrics=['acc'])
    return model