Python keras.optimizers 模块,Adamax() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用keras.optimizers.Adamax()

项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def getOptimizer(optim, exp_decay, grad_norm_clip, lr = 0.001):
    """Function for setting up optimizer, combines several presets from
    published well performing models on SQuAD."""

    optimizers = {
        'Adam': Adam(lr=lr, decay=exp_decay, clipnorm=grad_norm_clip),
        'Adamax': Adamax(lr=lr, decay=exp_decay, clipnorm=grad_norm_clip),
        'Adadelta': Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, decay=exp_decay, clipnorm=grad_norm_clip)
    }

    try:
        optimizer = optimizers[optim]
    except KeyError as e:
        raise ValueError('problems with defining optimizer: {}'.format(e.args[0]))

    del (optimizers)
    return optimizer

# ------------------------------------------------------------------------------
# Data/model utilities.
# ------------------------------------------------------------------------------
项目:deep-learning-with-Keras    作者:decordoba    | 项目源码 | 文件源码
def __init__(self):
        filters1 = [16, 32, 64]  # filters1 = [4, 8, 16, 32, 64, 128, 256]
        filters2 = [16, 32, 64]  # filters2 = [4, 8, 16, 32, 64, 128, 256]
        losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]  # losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]
        optimizers1 = [optimizers.Adam()]  # optimizers1 = [optimizers.Adadelta(), optimizers.Adagrad(), optimizers.Adam(), optimizers.Adamax(), optimizers.SGD(), optimizers.RMSprop()]
        units1 = [16, 32, 64]  # units1 = [4, 8, 16, 32, 64, 128, 256]
        kernel_sizes1 = [(3, 3)]  # kernel_sizes = [(3, 3), (5, 5)]
        dropouts1 = [0.25]  # dropouts1 = [0.25, 0.5, 0.75]
        dropouts2 = [0.5]  # dropouts2 = [0.25, 0.5, 0.75]
        pool_sizes1 = [(2, 2)]  # pool_sizes1 = [(2, 2)]

        # create standard experiments structure
        self.experiments = {"filters1": filters1,
                            "filters2": filters2,
                            "losses1": losses1,
                            "units1": units1,
                            "optimizers1": optimizers1,
                            "kernel_sizes1": kernel_sizes1,
                            "dropouts1": dropouts1,
                            "dropouts2": dropouts2,
                            "pool_sizes1": pool_sizes1}
项目:auto_ml    作者:ClimbsRocks    | 项目源码 | 文件源码
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.)
项目:nea    作者:nusnlp    | 项目源码 | 文件源码
def get_optimizer(args):

    clipvalue = 0
    clipnorm = 10

    if args.algorithm == 'rmsprop':
        optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'sgd':
        optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adagrad':
        optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adadelta':
        optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adam':
        optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adamax':
        optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)

    return optimizer
项目:Nerve-Segmentation    作者:matthewzhou    | 项目源码 | 文件源码
def create_model(img_rows, img_cols):
    model = Sequential() #initialize model
    model.add(Convolution2D(4, 3, 3, border_mode='same', activation='relu', init='he_normal',
                            input_shape=(1, img_rows, img_cols)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(8, 3, 3, border_mode='same', activation='relu', init='he_normal'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(2))
    model.add(Activation('softmax'))
    adm = Adamax()
    #sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=adm, loss='categorical_crossentropy')
    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_adamax():
    _test_optimizer(Adamax())
    _test_optimizer(Adamax(decay=1e-3))
项目:aetros-cli    作者:aetros    | 项目源码 | 文件源码
def get_learning_rate(self):

        if hasattr(self.model, 'optimizer'):
            config = self.model.optimizer.get_config()

            from keras.optimizers import Adadelta, Adam, Adamax, Adagrad, RMSprop, SGD

            if isinstance(self.model.optimizer, Adadelta) or isinstance(self.model.optimizer, Adam) \
                    or isinstance(self.model.optimizer, Adamax) or isinstance(self.model.optimizer, Adagrad)\
                    or isinstance(self.model.optimizer, RMSprop) or isinstance(self.model.optimizer, SGD):
                return config['lr'] * (1. / (1. + config['decay'] * float(K.get_value(self.model.optimizer.iterations))))

            elif 'lr' in config:
                return config['lr']
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_adamax():
    _test_optimizer(Adamax())
    _test_optimizer(Adamax(decay=1e-3))
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def train_mlp1(x_train, y_train, x_test, y_test, input_dim, num_classes=24):
    """

    :param x_train:
    :param y_train:
    :param x_test:
    :param y_test:
    :param input_dim:
    :param num_classes:
    :return:
    """
    model = Sequential()
    model.add(Dense(512, input_dim=input_dim))
    model.add(Activation('relu'))   # An "activation" is just a non-linear function applied to the output of the layer
                                    # above. Here, with a "rectified linear unit", we clamp all values below 0 to 0.
    model.add(Dropout(0.1))        # Dropout helps protect the model from memorizing or "overfitting" the training data
    model.add(Dense(1024))
    model.add(Activation('relu'))

    model.add(Dropout(0.1))
    model.add(Dense(386))
    model.add(Activation('relu'))

    model.add(Dropout(0.1))
    model.add(Dense(num_classes))
    model.add(Activation256('softmax'))  # This special "softmax" activation among other things,
                                      # ensures the output is a valid probability distribution, that is
                                      # that its values are all non-negative and sum to 1.

    model.compile(loss='categorical_crossentropy', optimizer=Adamax(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-5), metrics=["accuracy"])
    model.fit(x_train, y_train,
              batch_size=40, nb_epoch=16, verbose=1,
              validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=1)
    return score[1]
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_adamax():
    _test_optimizer(Adamax())
    _test_optimizer(Adamax(decay=1e-3))
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def train_cnn1(x_train, y_train, x_test, y_test, num_classes, input_shape):
    nb_filters = 32
    pool_size = (2, 2)
    kernel_size = (3, 3)

    model = Sequential()
    # model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=(64, 64, 1)))
    # model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
    #                         border_mode='valid'))
    #
    # model.add(Activation('relu'))
    # model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
    # model.add(Activation('relu'))
    # model.add(MaxPooling2D(pool_size=pool_size))
    # model.add(Dropout(0.25))
    #
    # model.add(Flatten())
    # model.add(Dense(64))
    # model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    # model.add(Dense(num_classes))
    # model.add(Activation('softmax'))

    model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=input_shape))

    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    model.add(Convolution2D(32, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    model.add(Flatten())
    model.add((Dense(512)))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    # model.summary()
    model.compile(loss='categorical_crossentropy', optimizer=Adamax(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-5), metrics=["accuracy"])
    model.fit(x_train, y_train,
              batch_size=24, nb_epoch=32, verbose=1,
              validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=1)
    return score[1]
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def train_cnn1(x_train, y_train, x_test, y_test, num_classes, input_shape):
    nb_filters = 32
    pool_size = (2, 2)
    kernel_size = (3, 3)

    model = Sequential()

    # model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=(64, 64, 1)))
    # model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
    #                         border_mode='valid'))
    #
    # model.add(Activation('relu'))
    # model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
    # model.add(Activation('relu'))
    # model.add(MaxPooling2D(pool_size=pool_size))
    # model.add(Dropout(0.25))
    #
    # model.add(Flatten())
    # model.add(Dense(64))
    # model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    # model.add(Dense(num_classes))
    # model.add(Activation('softmax'))

    model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=input_shape))

    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    model.add(Convolution2D(32, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    model.add(Flatten())
    model.add((Dense(512)))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    # model.summary()
    model.compile(loss='categorical_crossentropy', optimizer=Adamax(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-4), metrics=["accuracy"])
    model.fit(x_train, y_train,
              batch_size=24, nb_epoch=32, verbose=1,
              validation_data=(x_test, y_test))
    score = model.evaluate(x_test, y_test, verbose=1)
    return score[1]
项目:StockRecommendSystem    作者:doncat99    | 项目源码 | 文件源码
def lstm_model(self):
        model = Sequential()
        first = True
        for idx in range(len(self.paras.model['hidden_layers'])):
            if idx == (len(self.paras.model['hidden_layers']) - 1):
                model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=False))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))
            elif first == True:
                model.add(LSTM(input_shape=(None, int(self.paras.n_features)),
                               units=int(self.paras.model['hidden_layers'][idx]),
                               return_sequences=True))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))
                first = False
            else:
                model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=True))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))

        if self.paras.model['optimizer'] == 'sgd':
            #optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
            optimizer = optimizers.SGD(lr=self.paras.model['learning_rate'], decay=1e-6, momentum=0.9, nesterov=True)
        elif self.paras.model['optimizer'] == 'rmsprop':
            #optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.RMSprop(lr=self.paras.model['learning_rate']/10, rho=0.9, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adagrad':
            #optimizer = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.Adagrad(lr=self.paras.model['learning_rate'], epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adam':
            #optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adadelta':
            optimizer = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adamax':
            optimizer = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'nadam':
            optimizer = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
        else:
            optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        # output layer
        model.add(Dense(units=self.paras.model['out_layer']))
        model.add(Activation(self.paras.model['out_activation']))
        model.compile(loss=self.paras.model['loss'], optimizer=optimizer, metrics=['accuracy'])

        return model