Python keras 模块,optimizers() 实例源码

我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用keras.optimizers()

项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def tsinalis(input_shape, n_classes):
    """
    Input size should be [batch, 1d, 2d, ch] = (None, 1, 15000, 1)
    """
    model = Sequential(name='Tsinalis')
    model.add(Conv1D (kernel_size = (200), filters = 20, input_shape=input_shape, activation='relu'))
    print(model.input_shape)
    print(model.output_shape)
    model.add(MaxPooling1D(pool_size = (20), strides=(10)))
    print(model.output_shape)
    model.add(keras.layers.core.Reshape([20,-1,1]))
    print(model.output_shape)    
    model.add(Conv2D (kernel_size = (20,30), filters = 400, activation='relu'))
    print(model.output_shape)
    model.add(MaxPooling2D(pool_size = (1,10), strides=(1,2)))
    print(model.output_shape)
    model.add(Flatten())
    print(model.output_shape)
    model.add(Dense (500, activation='relu'))
    model.add(Dense (500, activation='relu'))
    model.add(Dense(n_classes, activation = 'softmax',activity_regularizer=keras.regularizers.l2()  ))
    model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(), metrics=[keras.metrics.categorical_accuracy])
    return model
项目:minos    作者:guybedo    | 项目源码 | 文件源码
def _build_optimizer(training):
    optimizer = getattr(keras.optimizers, training.optimizer.optimizer)
    return optimizer(**training.optimizer.parameters)
项目:deepanalytics_compe26_benchmark    作者:takagiwa-ss    | 项目源码 | 文件源码
def train():
    from keras.optimizers import SGD
    from keras.preprocessing.image import ImageDataGenerator

    logging.info('... building model')

    sgd = SGD(lr=_sgd_lr, decay=_sgd_decay, momentum=0.9, nesterov=True)

    model = resnet()
    model.compile(
        loss=_objective,
        optimizer=sgd,
        metrics=['mae'])

    logging.info('... loading data')

    X, Y = load_train_data()

    logging.info('... training')

    datagen = ImageDataGenerator(
        # data augmentation
        width_shift_range  = 1./8.,
        height_shift_range = 1./8.,
        rotation_range     = 0.,
        shear_range        = 0.,
        zoom_range         = 0.,
    )

    model.fit_generator(
        datagen.flow(X, Y, batch_size=_batch_size),
        samples_per_epoch=X.shape[0],
        nb_epoch=_nb_epoch,
        verbose=1)

    return model
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def compileModel(self,
                     learningrate,
                     **compileargs):
        if not self.keras_model:
            raise Exception('set model first') #can't happen
        #if self.compiled:
        #    return
        from keras.optimizers import Adam
        self.startlearningrate=learningrate
        self.optimizer = Adam(lr=self.startlearningrate)
        self.keras_model.compile(optimizer=self.optimizer,**compileargs)
        self.compiled=True
项目:kaggle_dsb    作者:syagev    | 项目源码 | 文件源码
def train_ensemble(trainset, valset, path_data, path_session, hyper_param):
    """Train an ensemble of models per set of hyper param.

    Args:
        trainset, valset: training and validation sets from `split_train_val()`
        path_data: /path/to/train_detections.hdf5
        path_session: string specifying the session's output path
        hyper_param: dictionary with entries as follows -
                        * epochs: number of epochs
                        * batch_sz: batch size in training
                        * batch_norm: do batch normalization?
                        * optimizer: a keras.optimizers beast
                        * lr_scheduler: a keras.callback.LearningRateScheduler

    """

    models = []
    for i, batch_sz in enumerate(hyper_param["batch_sz"]):
        for j, optimizer in enumerate(hyper_param["optimizers"]):
            for k, lr_param in enumerate(hyper_param["lr_scheduler_param"]):
                for l, dropout_rate in enumerate(hyper_param["dropout_rate"]):
                    for m, batch_norm in enumerate(hyper_param["batch_norm"]):
                        for n, pool_type in enumerate(hyper_param["pool_type"]):

                            # prepare the tasks' hyper param
                            hyper_param_ = {
                                "epochs": hyper_param["epochs"],
                                "batch_sz": batch_sz,
                                "optimizer": optimizer,
                                "lr_schedule": make_lr_scheduler(*lr_param),
                                "dropout_rate": dropout_rate,
                                "batch_norm": batch_norm,
                                "pool_type": pool_type
                                }

                            # task's path
                            session_id_ = "{}.{}_{}_{}_{}_{}_{}". \
                            format(os.path.basename(path_session),
                                   i, j, k, l, m, n)
                            path_session_ = os.path.join(path_session,
                                                         session_id_)
                            if not os.path.exists(path_session_):
                                os.mkdir(path_session_)

                            # train
                            models.append(train(
                                trainset,
                                valset,
                                path_data,
                                path_session_,
                                hyper_param_))

    # sort by validation loss
    return models.sort(key=lambda tuple: tuple[1])