Python keras.optimizers 模块,SGD 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.optimizers.SGD

项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def build_model(layers):
    model = Sequential()

    model.add(GRU(input_dim=layers[0], output_dim=layers[1], activation='tanh', return_sequences=True))
    model.add(Dropout(0.15))  # Dropout overfitting

    # model.add(GRU(layers[2],activation='tanh', return_sequences=True))
    # model.add(Dropout(0.2))  # Dropout overfitting

    model.add(GRU(layers[2], activation='tanh', return_sequences=False))
    model.add(Dropout(0.15))  # Dropout overfitting

    model.add(Dense(output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="rmsprop") # Nadam rmsprop
    print "Compilation Time : ", time.time() - start
    return model
项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = RMSprop(lr=LEARN_START)
    #optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png')
    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def make_teacher_model(train_data, validation_data, nb_epoch=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, 3, input_shape=input_shape,
                     border_mode='same', name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def build_network(deepest=False):
    dropout = [0., 0.1, 0.2, 0.3, 0.4]
    conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
    input= Input(shape=(3, 32, 32) if K._BACKEND == 'theano' else (32, 32,3))
    output = fractal_net(
        c=3, b=5, conv=conv,
        drop_path=0.15, dropout=dropout,
        deepest=deepest)(input)
    output = Flatten()(output)
    output = Dense(NB_CLASSES, init='he_normal')(output)
    output = Activation('softmax')(output)
    model = Model(input=input, output=output)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
    #optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
    optimizer = Adam()
    #optimizer = Nadam()
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    plot(model, to_file='model.png', show_shapes=True)
    return model
项目:five-video-classification-methods    作者:harvitronix    | 项目源码 | 文件源码
def freeze_all_but_mid_and_top(model):
    """After we fine-tune the dense layers, train deeper."""
    # we chose to train the top 2 inception blocks, i.e. we will freeze
    # the first 172 layers and unfreeze the rest:
    for layer in model.layers[:172]:
        layer.trainable = False
    for layer in model.layers[172:]:
        layer.trainable = True

    # we need to recompile the model for these modifications to take effect
    # we use SGD with a low learning rate
    model.compile(
        optimizer=SGD(lr=0.0001, momentum=0.9),
        loss='categorical_crossentropy',
        metrics=['accuracy', 'top_k_categorical_accuracy'])

    return model
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def make_teacher_model(train_data, validation_data, epochs=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, input_shape=input_shape,
                     padding='same', name='conv1'))
    model.add(MaxPooling2D(2, name='pool1'))
    model.add(Conv2D(64, 3, padding='same', name='conv2'))
    model.add(MaxPooling2D(2, name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(num_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y,
                        epochs=epochs,
                        validation_data=validation_data)
    return model, history
项目:dogsVScats    作者:prajwalkr    | 项目源码 | 文件源码
def runner(model, epochs):
    initial_LR = 0.001
    if not use_multiscale and not use_multicrop: training_gen, val_gen = DataGen()
    else: training_gen, val_gen = ms_traingen(), ms_valgen()

    model.compile(optimizer=SGD(initial_LR, momentum=0.9, nesterov=True), loss='binary_crossentropy')

    val_checkpoint = ModelCheckpoint('bestval.h5','val_loss',1, True)
    cur_checkpoint = ModelCheckpoint('current.h5')
    # def lrForEpoch(i): return initial_LR
    lrScheduler = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, cooldown=1, verbose=1)
    print 'Model compiled.'

    try:
        model.fit_generator(training_gen,samples_per_epoch,epochs,
                        verbose=1,validation_data=val_gen,nb_val_samples=nb_val_samples,
                        callbacks=[val_checkpoint, cur_checkpoint, lrScheduler])
    except Exception as e:
        print e
    finally:
        fname = dumper(model,'cnn')
        print 'Model saved to disk at {}'.format(fname)
        return model
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def build_model(model, wrapper, dataset, hyperparams, reweighting):
    def build_optimizer(opt, hyperparams):
        return {
            "sgd": SGD(
                lr=hyperparams.get("lr", 0.001),
                momentum=hyperparams.get("momentum", 0.0)
            ),
            "adam": Adam(lr=hyperparams.get("lr", 0.001))
        }[opt]

    model = models.get(model)(dataset.shape, dataset.output_size)
    model.compile(
        optimizer=build_optimizer(
            hyperparams.get("opt", "adam"),
            hyperparams
        ),
        loss=model.loss,
        metrics=model.metrics
    )

    return get_models_dictionary(hyperparams, reweighting)[wrapper](model)
项目:pydl    作者:rafaeltg    | 项目源码 | 文件源码
def get_optimizer(self):

        if self.opt == 'sgd':
            return k_opt.SGD(lr=self.learning_rate, momentum=self.momentum)

        if self.opt == 'rmsprop':
            return k_opt.RMSprop(lr=self.learning_rate)

        if self.opt == 'adagrad':
            return k_opt.Adagrad(lr=self.learning_rate)

        if self.opt == 'adadelta':
            return k_opt.Adadelta(lr=self.learning_rate)

        if self.opt == 'adam':
            return k_opt.Adam(lr=self.learning_rate)

        raise Exception('Invalid optimization function - %s' % self.opt)
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def make_teacher_model(train_data, validation_data, epochs=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, input_shape=input_shape,
                     padding='same', name='conv1'))
    model.add(MaxPooling2D(2, name='pool1'))
    model.add(Conv2D(64, 3, padding='same', name='conv2'))
    model.add(MaxPooling2D(2, name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(num_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y,
                        epochs=epochs,
                        validation_data=validation_data)
    return model, history
项目:Youtube8mdataset_kagglechallenge    作者:jasonlee27    | 项目源码 | 文件源码
def train(self, model, saveto_path=''):
        x_train, y_train = get_data(self.train_data_path, "train", "frame", self.feature_type)
        print('%d training frame level samples.' % len(x_train))
        x_valid, y_valid = get_data(self.valid_data_path, "valid", "frame", self.feature_type)
        print('%d validation frame level samples.' % len(x_valid))

        sgd = SGD(lr=0.01,
                  decay=1e-6,
                  momentum=0.9,
                  nesterov=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer=sgd,
                      metrics=['accuracy'])

        callbacks = list()
        callbacks.append(CSVLogger(LOG_FILE))
        callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.0001))

        if saveto_path:
            callbacks.append(ModelCheckpoint(filepath=MODEL_WEIGHTS, verbose=1))

        model.fit(x_train,
                  y_train,
                  epochs=5,
                  callbacks=callbacks,
                  validation_data=(x_valid, y_valid))

        # Save the weights on completion.
        if saveto_path:
            model.save_weights(saveto_path)
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def train_top_model():
    # Load the bottleneck features and labels
    train_features = np.load(open(output_dir+'bottleneck_features_train.npy', 'rb'))
    train_labels = np.load(open(output_dir+'bottleneck_labels_train.npy', 'rb'))
    validation_features = np.load(open(output_dir+'bottleneck_features_validation.npy', 'rb'))
    validation_labels = np.load(open(output_dir+'bottleneck_labels_validation.npy', 'rb'))

    # Create the top model for the inception V3 network, a single Dense layer
    # with softmax activation.
    top_input = Input(shape=train_features.shape[1:])
    top_output = Dense(5, activation='softmax')(top_input)
    model = Model(top_input, top_output)

    # Train the model using the bottleneck features and save the weights.
    model.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    csv_logger = CSVLogger(output_dir + 'top_model_training.csv')
    model.fit(train_features, train_labels,
              epochs=top_epochs,
              batch_size=batch_size,
              validation_data=(validation_features, validation_labels),
              callbacks=[csv_logger])
    model.save_weights(top_model_weights_path)
项目:cnn-lstm-gan-music-generation    作者:MarkSeygan    | 项目源码 | 文件源码
def generate(SONG_LENGTH, nb):

    generator = generator_model()
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    generator.load_weights('generator')

    print "loading_latent_music"
    latent_music = trainLoadMusic.loadMusic("lstm_outputs", SONG_LENGTH)

    for i in range(nb):

        latent = random.choice(latent_music)

        song = generator.predict(latent, verbose=1)

        song = song.reshape((SONG_LENGTH,note_span_with_ligatures/2,2))
        song_0 = generate_from_probabilities(song_0)
        matrixToMidi(song_0,'outputs/example {}'.format(i))
项目:deep-learning-with-Keras    作者:decordoba    | 项目源码 | 文件源码
def __init__(self):
        filters1 = [16, 32, 64]  # filters1 = [4, 8, 16, 32, 64, 128, 256]
        filters2 = [16, 32, 64]  # filters2 = [4, 8, 16, 32, 64, 128, 256]
        losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]  # losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]
        optimizers1 = [optimizers.Adam()]  # optimizers1 = [optimizers.Adadelta(), optimizers.Adagrad(), optimizers.Adam(), optimizers.Adamax(), optimizers.SGD(), optimizers.RMSprop()]
        units1 = [16, 32, 64]  # units1 = [4, 8, 16, 32, 64, 128, 256]
        kernel_sizes1 = [(3, 3)]  # kernel_sizes = [(3, 3), (5, 5)]
        dropouts1 = [0.25]  # dropouts1 = [0.25, 0.5, 0.75]
        dropouts2 = [0.5]  # dropouts2 = [0.25, 0.5, 0.75]
        pool_sizes1 = [(2, 2)]  # pool_sizes1 = [(2, 2)]

        # create standard experiments structure
        self.experiments = {"filters1": filters1,
                            "filters2": filters2,
                            "losses1": losses1,
                            "units1": units1,
                            "optimizers1": optimizers1,
                            "kernel_sizes1": kernel_sizes1,
                            "dropouts1": dropouts1,
                            "dropouts2": dropouts2,
                            "pool_sizes1": pool_sizes1}
项目:auto_ml    作者:ClimbsRocks    | 项目源码 | 文件源码
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def make_teacher_model(train_data, validation_data, nb_epoch=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, 3, input_shape=input_shape,
                     border_mode='same', name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
项目:huaat_ml_dl    作者:ieee820    | 项目源码 | 文件源码
def predict_by_one(cube):
    # load json and create model
    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights("model.hdf5")
    print("Loaded model from disk")
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    loaded_model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
    x = cube.reshape(-1,1,6,20,20)
    print(x.shape)
    result = loaded_model.predict(x,batch_size=10, verbose=0)
    # print(result.shape)
    # show result
    for i in result:
        print(i[0],i[1])
    return result
项目:HSICNN    作者:jamesbing    | 项目源码 | 文件源码
def Net_model(lr=0.005,decay=1e-6,momentum=0.9):
    model = Sequential()
    model.add(Convolution2D(nb_filters1, nb_conv, nb_conv,
                            border_mode='valid',
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))

    model.add(Convolution2D(nb_filters2, nb_conv, nb_conv))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    #model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(1000)) #Full connection
    model.add(Activation('tanh'))
    #model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    return model
项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def build_model(layers):
    model = Sequential()

    model.add(Dense(layers[1], input_shape=(20,), activation='relu'))
    model.add(Dropout(0.2))  # Dropout overfitting

    # model.add(Dense(layers[2],activation='tanh'))
    # model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(layers[2], activation='relu'))
    model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(output_dim=layers[3]))
    model.add(Activation("softmax"))

    model.summary()

    start = time.time()
    # sgd = SGD(lr=0.5, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy']) # Nadam RMSprop()
    print "Compilation Time : ", time.time() - start
    return model
项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def build_model(layers):
    model = Sequential()

    model.add(Dense(layers[1], input_shape=(20,), activation='tanh'))
    model.add(Dropout(0.2))  # Dropout overfitting

    # model.add(Dense(layers[2],activation='tanh'))
    # model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(layers[2], activation='tanh'))
    model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(output_dim=layers[3]))
    model.add(Activation("linear"))

    start = time.time()
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="adam") # Nadam
    print "Compilation Time : ", time.time() - start
    return model
项目:motion-classification    作者:matthiasplappert    | 项目源码 | 文件源码
def fit(self, X, y):
        assert isinstance(X, list)  #TODO: this should not be an assert
        assert len(y) > 0
        assert len(X) == len(y)

        X = pad_sequences(X)
        print X.shape, y.shape

        n_features = X.shape[2]
        self.n_labels_ = y.shape[1]
        print n_features, self.n_labels_

        model = Sequential()
        model.add(GRU(n_features, 128))
        model.add(Dropout(0.1))
        model.add(BatchNormalization(128))
        model.add(Dense(128, self.n_labels_))
        model.add(Activation('sigmoid'))

        sgd = opt.SGD(lr=0.005, decay=1e-6, momentum=0., nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode='categorical')

        model.fit(X, y, batch_size=self.n_batch_size, nb_epoch=self.n_epochs, show_accuracy=True)
        self.model_ = model
项目:tartarus    作者:sergiooramas    | 项目源码 | 文件源码
def build_model(config):
    """Builds the cnn."""
    params = config.model_arch
    get_model = getattr(models, 'get_model_'+str(params['architecture']))
    model = get_model(params)
    #model = model_kenun.build_convnet_model(params)
    # Learning setup
    t_params = config.training_params
    sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
              momentum=t_params["momentum"], nesterov=t_params["nesterov"])
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    optimizer = eval(t_params['optimizer'])
    metrics = ['mean_squared_error']
    if config.model_arch["final_activation"] == 'softmax':
        metrics.append('categorical_accuracy')
    if t_params['loss_func'] == 'cosine':
        loss_func = eval(t_params['loss_func'])
    else:
        loss_func = t_params['loss_func']
    model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)

    return model
项目:dsr16_nlp    作者:honnibal    | 项目源码 | 文件源码
def __init__(self, widths, vocab_size=5000):
        from keras.models import Sequential
        from keras.layers import Embedding, Dense, TimeDistributedMerge
        from keras.layers.advanced_activations import ELU
        from keras.preprocessing.sequence import pad_sequences
        from keras.optimizers import SGD
        self.n_classes = widths[-1]
        self.vocab_size = vocab_size
        self.word_to_int = {}
        self.int_to_word = np.ndarray(shape=(vocab_size+1,), dtype='int64')
        self.model = Sequential()
        self.model.add(Embedding(vocab_size, widths[0]))
        self.model.add(TimeDistributedMerge(mode='ave'))
        for width in widths[1:-1]:
            layer = Dense(output_dim=hidden_width, init='he_normal', activation=ELU(1.0))
            self.model.add(layer)
        self.model.add(
            Dense(
                n_classes,
                init='zero',
                activation='softmax'))
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='categorical_crossentropy', optimizer=sgd)
项目:reading-text-in-the-wild    作者:mathDR    | 项目源码 | 文件源码
def __init__(self, architecture_file=None, weight_file=None, optimizer=None):
        # Generate mapping for softmax layer to characters
        output_str = '0123456789abcdefghijklmnopqrstuvwxyz '
        self.output = [x for x in output_str]
        self.L = len(self.output)

        # Load model and saved weights
        from keras.models import model_from_json
        if architecture_file is None:
            self.model = model_from_json(open('char2_architecture.json').read())
        else:
            self.model = model_from_json(open(architecture_file).read())

        if weight_file is None:
            self.model.load_weights('char2_weights.h5')
        else:
            self.model.load_weights(weight_file)

        if optimizer is None:
            from keras.optimizers import SGD
            optimizer = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
项目:keras-mxnet-benchmarks    作者:sandeep-krishnamurthy    | 项目源码 | 文件源码
def make_teacher_model(train_data, validation_data, nb_epoch=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, 3, input_shape=input_shape,
                     border_mode='same', name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))
    model = make_model(model, loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
项目:Exoplanet-Artificial-Intelligence    作者:pearsonkyle    | 项目源码 | 文件源码
def make_wave(maxlen):
    model = Sequential()
    # conv1
    model.add(Dense(64,input_dim=maxlen, kernel_initializer='he_normal',bias_initializer='zeros' ) )
    model.add(PRELU())
    model.add(Dropout(0.25))

    model.add(Dense(32))
    model.add(PRELU())

    model.add(Dense(8))
    model.add(PRELU())

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    SGDsolver = SGD(lr=0.1, momentum=0.25, decay=0.0001, nesterov=True)
    model.compile(loss='binary_crossentropy',
                optimizer=SGDsolver,
                metrics=['accuracy'])
    return model
项目:Reinforcement_Learning_Project    作者:AaronYALai    | 项目源码 | 文件源码
def initAgent(neurons=512, layers=1, lr=1e-3,
              moment=0.9, width=19, alpha=0.1):
    """Initialize agent: specify num of neurons and hidden layers"""
    model = Sequential()
    model.add(Dense(2 * width**2, init='lecun_uniform',
              input_shape=(2 * width**2,)))
    model.add(LeakyReLU(alpha=alpha))

    for i in range(layers):
        model.add(Dense(neurons, init='lecun_uniform'))
        model.add(LeakyReLU(alpha=alpha))
        model.add(Dropout(0.2))

    model.add(Dense(width**2, init='lecun_uniform'))
    # use linear output layer to generate real-valued outputs
    model.add(Activation('linear'))

    # opt = RMSprop(lr=lr)
    opt = SGD(lr=lr, momentum=moment, decay=1e-18, nesterov=False)
    model.compile(loss='mse', optimizer=opt)

    return model
项目:cancer_nn    作者:tanmoyopenroot    | 项目源码 | 文件源码
def topModel(optimizer='adam', init='glorot_uniform'):
    # Create Model
    model = Sequential()
    model.add(Flatten(input_shape = (7, 7, 512) ))
    model.add(Dense(1024, kernel_initializer=init, activation="relu"))
    model.add(Dropout(0.7))    
    # model.add(Dense(4096, activation="relu"))
    model.add(Dense(1, kernel_initializer=init, activation="sigmoid"))

    # Compile model
    model.compile(loss='binary_crossentropy', optimizer=optimizer,  metrics=['accuracy'])

    # model.compile(
    #     loss='binary_crossentropy',
    #     optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
    #     metrics=['accuracy']
    # )

    return model
项目:cancer_nn    作者:tanmoyopenroot    | 项目源码 | 文件源码
def topModel(optimizer='adam', init='glorot_uniform'):
    # Create Model
    model = Sequential()
    model.add(Flatten(input_shape = (7, 7, 512) ))
    model.add(Dense(256, kernel_initializer=init, activation="relu"))
    model.add(Dropout(0.7))    
    # model.add(Dense(4096, activation="relu"))
    model.add(Dense(1, kernel_initializer=init, activation="sigmoid"))

    # Compile model
    model.compile(loss='binary_crossentropy', optimizer=optimizer,  metrics=['accuracy'])

    # model.compile(
    #     loss='binary_crossentropy',
    #     optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
    #     metrics=['accuracy']
    # )

    return model
项目:cv_ml    作者:techfort    | 项目源码 | 文件源码
def create_model(learning_rate=0.1, momentum=0.9):
    model = Sequential()
    model.add(Convolution2D(20, 9, 9, border_mode='same', input_shape=(3, SIZE, SIZE)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Convolution2D(50, 5, 5, activation = "relu"))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Flatten())
    model.add(Dense(768, input_dim=3072, init='uniform', activation = 'relu'))
    model.add(Dropout(0.1))
    model.add(Dense(384, init = 'uniform',  activation = 'relu', W_constraint=maxnorm(3)))
    model.add(Dense(4))
    model.add(Activation("softmax"))
    sgd = SGD(lr=learning_rate, momentum=momentum, nesterov=True, decay=1e-6)
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=["accuracy"])
    return model
项目:nea    作者:nusnlp    | 项目源码 | 文件源码
def get_optimizer(args):

    clipvalue = 0
    clipnorm = 10

    if args.algorithm == 'rmsprop':
        optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'sgd':
        optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adagrad':
        optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adadelta':
        optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adam':
        optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adamax':
        optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)

    return optimizer
项目:Nerve-Segmentation    作者:matthewzhou    | 项目源码 | 文件源码
def create_model(img_rows, img_cols):
    model = Sequential() #initialize model
    model.add(Convolution2D(4, 3, 3, border_mode='same', activation='relu', init='he_normal',
                            input_shape=(1, img_rows, img_cols)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(8, 3, 3, border_mode='same', activation='relu', init='he_normal'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(2))
    model.add(Activation('softmax'))
    adm = Adamax()
    #sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=adm, loss='categorical_crossentropy')
    return model
项目:ABiViRNet    作者:lvapeab    | 项目源码 | 文件源码
def setOptimizer(self, **kwargs):

        """
        Sets a new optimizer for the Translation_Model.
        :param **kwargs:
        """

        # compile differently depending if our model is 'Sequential' or 'Graph'
        if self.verbose > 0:
            logging.info("Preparing optimizer and compiling.")
        if self.params['OPTIMIZER'].lower() == 'adam':
            optimizer = Adam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'rmsprop':
            optimizer = RMSprop(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'nadam':
            optimizer = Nadam(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'adadelta':
            optimizer = Adadelta(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        elif self.params['OPTIMIZER'].lower() == 'sgd':
            optimizer = SGD(lr=self.params['LR'], clipnorm=self.params['CLIP_C'])
        else:
            logging.info('\tWARNING: The modification of the LR is not implemented for the chosen optimizer.')
            optimizer = eval(self.params['OPTIMIZER'])
        self.model.compile(optimizer=optimizer, loss=self.params['LOSS'],
                           sample_weight_mode='temporal' if self.params['SAMPLE_WEIGHTS'] else None)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def make_teacher_model(train_data, validation_data, nb_epoch=3):
    '''Train a simple CNN as teacher model.
    '''
    model = Sequential()
    model.add(Conv2D(64, 3, 3, input_shape=input_shape,
                     border_mode='same', name='conv1'))
    model.add(MaxPooling2D(name='pool1'))
    model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2'))
    model.add(MaxPooling2D(name='pool2'))
    model.add(Flatten(name='flatten'))
    model.add(Dense(64, activation='relu', name='fc1'))
    model.add(Dense(nb_class, activation='softmax', name='fc2'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(lr=0.01, momentum=0.9),
                  metrics=['accuracy'])

    train_x, train_y = train_data
    history = model.fit(train_x, train_y, nb_epoch=nb_epoch,
                        validation_data=validation_data)
    return model, history
项目:Kaggle-DSB    作者:Wrosinski    | 项目源码 | 文件源码
def preds3d_baseline(width):

    learning_rate = 5e-5
    #optimizer = SGD(lr=learning_rate, momentum = 0.9, decay = 1e-3, nesterov = True)
    optimizer = Adam(lr=learning_rate)

    inputs = Input(shape=(1, 136, 168, 168))
    conv1 = Convolution3D(width, 3, 3, 3, activation = 'relu', border_mode='same')(inputs)
    conv1 = BatchNormalization(axis = 1)(conv1)
    conv1 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv1)
    conv1 = BatchNormalization(axis = 1)(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv1)

    conv2 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(pool1)
    conv2 = BatchNormalization(axis = 1)(conv2)
    conv2 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv2)
    conv2 = BatchNormalization(axis = 1)(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv2)

    conv3 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(pool2)
    conv3 = BatchNormalization(axis = 1)(conv3)
    conv3 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv3)
    conv3 = BatchNormalization(axis = 1)(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv3)

    output = GlobalAveragePooling3D()(pool3)
    output = Dense(2, activation='softmax', name = 'predictions')(output)
    model3d = Model(inputs, output)
    model3d.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
    return model3d
项目:detection-2016-nipsws    作者:imatge-upc    | 项目源码 | 文件源码
def obtain_compiled_vgg_16(vgg_weights_path):
    model = vgg_16(vgg_weights_path)
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy')
    return model
项目:dcgan    作者:kyloon    | 项目源码 | 文件源码
def generate(batch_size, pretty=False):
    generator = generator_model()
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    generator.load_weights('generator_weights')
    if pretty:
        discriminator = discriminator_model()
        discriminator.compile(loss='binary_crossentropy', optimizer="SGD")
        discriminator.load_weights('discriminator_weights')
        noise = np.zeros((batch_size*20, 100))
        for i in range(batch_size*20):
            noise[i, :] = np.random.uniform(-1, 1, 100)
        generated_images = generator.predict(noise, verbose=1)
        d_pret = discriminator.predict(generated_images, verbose=1)
        index = np.arange(0, batch_size*20)
        index.resize((batch_size*20, 1))
        pre_with_index = list(np.append(d_pret, index, axis=1))
        pre_with_index.sort(key=lambda x: x[0], reverse=True)
        pretty_images = np.zeros((batch_size, 1) +
                               (generated_images.shape[2:]), dtype=np.float32)
        for i in range(int(batch_size)):
            idx = int(pre_with_index[i][1])
            pretty_images[i, 0, :, :] = generated_images[idx, 0, :, :]
        image = combine_images(pretty_images)
    else:
        noise = np.zeros((batch_size, 100))
        for i in range(batch_size):
            noise[i, :] = np.random.uniform(-1, 1, 100)
        generated_images = generator.predict(noise, verbose=1)
        image = combine_images(generated_images)
    image = image*127.5+127.5
    Image.fromarray(image.astype(np.uint8)).save(
        "images/generated_image.png")
项目:shenlan    作者:vector-1127    | 项目源码 | 文件源码
def generate(BATCH_SIZE, nice=False):
    (X_train, Y_train) = get_data('test')
    #print(np.shape(X_train))
    X_train = (X_train.astype(np.float32) - 127.5)/127.5
    Y_train = (Y_train.astype(np.float32) - 127.5)/127.5

    generator = generator_model()
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    generator.load_weights('generator')
    if nice:
        discriminator = discriminator_model()
        discriminator.compile(loss='binary_crossentropy', optimizer="SGD")
        discriminator.load_weights('discriminator')

        generated_images = generator.predict(X_train, verbose=1)
        d_pret = discriminator.predict(generated_images, verbose=1)
        index = np.arange(0, BATCH_SIZE*20)
        index.resize((BATCH_SIZE*20, 1))
        pre_with_index = list(np.append(d_pret, index, axis=1))
        pre_with_index.sort(key=lambda x: x[0], reverse=True)
        nice_images = np.zeros((BATCH_SIZE, 1) + (generated_images.shape[2:]), dtype=np.float32)
        for i in range(int(BATCH_SIZE)):
            idx = int(pre_with_index[i][1])
            nice_images[i, 0, :, :] = generated_images[idx, 0, :, :]
        image = combine_images(nice_images)
    else:
        generated_images = generator.predict(X_train)
        image = combine_images(generated_images)
    image = image*127.5+127.5
    image = np.swapaxes(image,0,2)
    cv2.imwrite('generated.png',image)
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def generate(BATCH_SIZE, nice=False):
    generator = generator_model()
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    generator.load_weights('generator')
    if nice:
        discriminator = discriminator_model()
        discriminator.compile(loss='binary_crossentropy', optimizer="SGD")
        discriminator.load_weights('discriminator')
        noise = np.zeros((BATCH_SIZE*20, 100))
        for i in range(BATCH_SIZE*20):
            noise[i, :] = np.random.uniform(-1, 1, 100)
        generated_images = generator.predict(noise, verbose=1)
        d_pret = discriminator.predict(generated_images, verbose=1)
        index = np.arange(0, BATCH_SIZE*20)
        index.resize((BATCH_SIZE*20, 1))
        pre_with_index = list(np.append(d_pret, index, axis=1))
        pre_with_index.sort(key=lambda x: x[0], reverse=True)
        nice_images = np.zeros((BATCH_SIZE, 1) +
                               (generated_images.shape[2:]), dtype=np.float32)
        for i in range(int(BATCH_SIZE)):
            idx = int(pre_with_index[i][1])
            nice_images[i, 0, :, :] = generated_images[idx, 0, :, :]
        image = combine_images(nice_images)
    else:
        noise = np.zeros((BATCH_SIZE, 100))
        for i in range(BATCH_SIZE):
            noise[i, :] = np.random.uniform(-1, 1, 100)
        generated_images = generator.predict(noise, verbose=1)
        image = combine_images(generated_images)
    image = image*127.5+127.5
    Image.fromarray(image.astype(np.uint8)).save(
        "generated_image.png")
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def fhan2_max(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):
    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=False, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)


    Si = GlobalMaxPooling1D()(hij)

    wordEncoder = Model(wordInputs, Si)

    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    #sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(docInputs) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    Vb = GlobalMaxPooling1D()(hi)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'glorot_uniform', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model, wordEncoder
项目:3HAN    作者:ni9elf    | 项目源码 | 文件源码
def han2(MAX_NB_WORDS, MAX_WORDS, MAX_SENTS, EMBEDDING_DIM, WORDGRU, embedding_matrix, DROPOUTPER):

    wordInputs = Input(shape=(MAX_WORDS,), name="wordInputs", dtype='float32')

    #print 'in han2 max-nb-words'
    #print MAX_NB_WORDS

    wordEmbedding = Embedding(MAX_NB_WORDS, EMBEDDING_DIM, weights=[embedding_matrix], mask_zero=True, trainable=True, name='wordEmbedding')(wordInputs) 

    hij = Bidirectional(GRU(WORDGRU, return_sequences=True), name='gru1')(wordEmbedding)

    alpha_its, Si = AttentionLayer(name='att1')(hij)

    #wordDrop = Dropout(DROPOUTPER, name='wordDrop')(Si)

    wordEncoder = Model(wordInputs, Si)
    # -----------------------------------------------------------------------------------------------

    docInputs = Input(shape=(None, MAX_WORDS), name='docInputs' ,dtype='float32')

    sentenceMasking = Masking(mask_value=0.0, name='sentenceMasking')(docInputs)

    sentEncoding = TimeDistributed(wordEncoder, name='sentEncoding')(sentenceMasking) 

    hi = Bidirectional(GRU(WORDGRU, return_sequences=True), merge_mode='concat', name='gru2')(sentEncoding)

    alpha_s, Vb = AttentionLayer(name='att2')(hi)

    #sentDrop = Dropout(DROPOUTPER, name='sentDrop')(Vb)

    v6 = Dense(1, activation="sigmoid", kernel_initializer = 'he_normal', name="dense")(Vb)
    model = Model(inputs=[docInputs] , outputs=[v6])

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

    return model, wordEncoder
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def create_ft_extractor(self, type_mod, weights_path):
        """Extract the features from x using a convnet model."""
#        model = convnet(type_mod, weights_path=weights_path, heatmap=False,
#                        W_regularizer=None,
#                        activity_regularizer=None,
#                        dense=False)
#        sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#        model.compile(optimizer=sgd, loss="mse")
#        print "Summary:", model.summary()
        model = None

        return model
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def baseline_model():
    # create model
    input_shape = (1, 50, 50)
    model = Sequential()
    model.add(Conv2D(16, (3, 3),
                 activation='sigmoid',
                 strides=(1, 1),
                 data_format='channels_first',
                 padding='same',
                 input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
    model.add(Conv2D(48, kernel_size=(3, 3),
                 activation='sigmoid',
                 strides=(1, 1),
                 data_format="channels_first",
                 padding="same",
                 input_shape=input_shape))
    model.add(Conv2D(64, kernel_size=(3, 3),
                 activation='sigmoid',
                 strides=(1, 1),
                 data_format="channels_first",
                 padding="same",
                 input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_first'))
    model.add(Conv2D(64, kernel_size=(3, 3),
                 activation='sigmoid',
                 strides=(1, 1),
                 data_format="channels_first",
                 padding="same",
                 input_shape=input_shape))
    model.add(Flatten())
    model.add(Dense(64, activation='sigmoid'))
    model.add(Dense(68*2, activation='tanh'))
    # Compile model
    sgd = SGD(lr=1e-4, momentum=0.9, decay=1e-6, nesterov=False)
    model.compile(loss='mean_squared_error', optimizer=sgd)
    return model
项目:PSPNet-Keras-tensorflow    作者:Vladkryvoruchko    | 项目源码 | 文件源码
def build_pspnet(nb_classes, resnet_layers, input_shape, activation='softmax'):
    """Build PSPNet."""
    print("Building a PSPNet based on ResNet %i expecting inputs of shape %s predicting %i classes" % (resnet_layers, input_shape, nb_classes))

    inp = Input((input_shape[0], input_shape[1], 3))
    res = ResNet(inp, layers=resnet_layers)
    psp = build_pyramid_pooling_module(res, input_shape)

    x = Conv2D(512, (3, 3), strides=(1, 1), padding="same", name="conv5_4",
               use_bias=False)(psp)
    x = BN(name="conv5_4_bn")(x)
    x = Activation('relu')(x)
    x = Dropout(0.1)(x)

    x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="conv6")(x)
    x = Lambda(Interp, arguments={'shape': (input_shape[0], input_shape[1])})(x)
    x = Activation('softmax')(x)

    model = Model(inputs=inp, outputs=x)

    # Solver
    sgd = SGD(lr=learning_rate, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
项目:TC-Lung_nodules_detection    作者:Shicoder    | 项目源码 | 文件源码
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None, features=False, mal=False) -> Model:
    inputs = Input(shape=input_shape, name="input_1")
    x = inputs
    #x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), border_mode="same")(x)
    x = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same', name='conv1', subsample=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), border_mode='valid', name='pool1')(x)

    # 2nd layer group
    x = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same', name='conv2', subsample=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool2')(x)
    #if USE_DROPOUT:
     #   x = Dropout(p=0.3)(x)

    # 3rd layer group
    x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3a', subsample=(1, 1, 1))(x)
    x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3b', subsample=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool3')(x)
    #if USE_DROPOUT:
     #   x = Dropout(p=0.4)(x)

    # 4th layer group
    x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4a', subsample=(1, 1, 1))(x)
    x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4b', subsample=(1, 1, 1),)(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool4')(x)
    #if USE_DROPOUT:
     #   x = Dropout(p=0.5)(x)

    last64 = Convolution3D(64, 2, 2, 2, activation="relu", name="last_64")(x)
    out_class = Convolution3D(1, 1, 1, 1, activation="sigmoid", name="out_class_last")(last64)
    out_class = Flatten(name="out_class")(out_class)

    out_malignancy = Convolution3D(1, 1, 1, 1, activation=None, name="out_malignancy_last")(last64)
    out_malignancy = Flatten(name="out_malignancy")(out_malignancy)

    model = Model(input=inputs, output=[out_class, out_malignancy])
    if load_weight_path is not None:
        model.load_weights(load_weight_path, by_name=False)
    #model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy", "out_malignancy": mean_absolute_error}, metrics={"out_class": [binary_accuracy, binary_crossentropy], "out_malignancy": mean_absolute_error})
    model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy"}, metrics={"out_class": [binary_accuracy, binary_crossentropy]})
    if features:
        model = Model(input=inputs, output=[last64])
    model.summary(line_length=140)

    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_sgd():
    sgd = SGD(lr=0.01, momentum=0.9, nesterov=True)
    _test_optimizer(sgd)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_ReduceLROnPlateau():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    def make_model():
        np.random.seed(1337)
        model = Sequential()
        model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
        model.add(Dense(nb_class, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.SGD(lr=0.1),
                      metrics=['accuracy'])
        return model

    model = make_model()

    # This should reduce the LR after the first epoch (due to high epsilon).
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())

    model = make_model()
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
项目:deer    作者:VinF    | 项目源码 | 文件源码
def _compile(self):
        """ compile self.q_vals
        """
        if (self._update_rule=="sgd"):
            optimizer = SGD(lr=self._lr, momentum=self._momentum, nesterov=False)
        elif (self._update_rule=="rmsprop"):
            optimizer = RMSprop(lr=self._lr, rho=self._rho, epsilon=self._rms_epsilon)
        else:
            raise Exception('The update_rule '+self._update_rule+' is not implemented.')

        self.q_vals.compile(optimizer=optimizer, loss='mse')
项目:ai-bs-summer17    作者:uchibe    | 项目源码 | 文件源码
def createModel(self):

        model = Sequential()
        model.add(Conv2D(16, (3, 3), strides=(2, 2), input_shape=(self.img_rows, self.img_cols, self.img_channels)))
        model.add(Activation('relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Conv2D(16, (3, 3), strides=(2, 2)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation('relu'))
        # model.add(Dropout(0.5))
        model.add(Dense(self.output_size))
        # model.add(Activation('softmax'))
        # model.compile(RMSprop(lr=self.learningRate), 'MSE')
        # sgd = SGD(lr=self.learningRate)
        adam = Adam(lr=self.learningRate)
        model.compile(loss='mse', optimizer=adam)
        model.summary()

        return model
项目:Q-A-Recommender-System-Machine-Learning    作者:Yuanxiang-Wu    | 项目源码 | 文件源码
def model_Train(X_tr, Y_tr, arch, actfn='sigmoid', last_act='sigmoid', reg_coeff=0.0,
                num_epoch=100, batch_size=1000, sgd_lr=1e-5, sgd_decay=0.0, sgd_mom=0.0,
                    sgd_Nesterov=False, EStop=False):
    call_ES = EarlyStopping(monitor='val_acc', patience=6, mode='auto')
    model = gen_Model(num_units=arch, actfn=actfn, reg_coeff=reg_coeff, last_act=last_act)
    sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

    if EStop:
        model.fit(X_tr, Y_tr, nb_epoch=num_epoch, batch_size=batch_size, callbacks=[call_ES],
                   validation_split=0.1, validation_data=None, shuffle=True)
    else:
        model.fit(X_tr, Y_tr, batch_size=100, nb_epoch=10, shuffle=True, verbose=1,show_accuracy=True,validation_split=0.2)
    return model