Python keras.optimizers 模块,Adagrad() 实例源码

我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用keras.optimizers.Adagrad()

项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, feature_weights=None):
        print 'Training autoencoder'
        optimizer = Adadelta(lr=1.5)
        # optimizer = Adam()
        # optimizer = Adagrad()
        if feature_weights is None:
            self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse
        else:
            print 'Using weighted loss'
            self.autoencoder.compile(optimizer=optimizer, loss=weighted_binary_crossentropy(feature_weights)) # kld, binary_crossentropy, mse

        self.autoencoder.fit(train_X[0], train_X[1],
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        validation_data=(val_X[0], val_X[1]),
                        callbacks=[
                                    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                                    EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                                    # ModelCheckpoint(self.model_save_path, monitor='val_loss', save_best_only=True, verbose=0),
                        ]
                        )

        return self
项目:CCIR    作者:xiaogang00    | 项目源码 | 文件源码
def get_graph(num_users, num_items, latent_dim):

    model = Graph()
    model.add_input(name='user_input', input_shape=(num_users,))
    model.add_input(name='positive_item_input', input_shape=(num_items,))
    model.add_input(name='negative_item_input', input_shape=(num_items,))

    model.add_node(layer=Dense(latent_dim, input_shape = (num_users,)),
                   name='user_latent',
                   input='user_input')
    model.add_shared_node(layer=Dense(latent_dim, input_shape = (num_items,)), 
                          name='item_latent', 
                          inputs=['positive_item_input', 'negative_item_input'],
                          merge_mode=None, 
                          outputs=['positive_item_latent', 'negative_item_latent'])

    model.add_node(layer=Activation('linear'), name='user_pos', inputs=['user_latent', 'positive_item_latent'], merge_mode='dot', dot_axes=1)
    model.add_node(layer=Activation('linear'), name='user_neg', inputs=['user_latent', 'negative_item_latent'], merge_mode='dot', dot_axes=1)

    model.add_output(name='triplet_loss_out', inputs=['user_pos', 'user_neg'])
    model.compile(loss={'triplet_loss_out': ranking_loss}, optimizer=Adam())#Adagrad(lr=0.1, epsilon=1e-06))

    return model
项目:pydl    作者:rafaeltg    | 项目源码 | 文件源码
def get_optimizer(self):

        if self.opt == 'sgd':
            return k_opt.SGD(lr=self.learning_rate, momentum=self.momentum)

        if self.opt == 'rmsprop':
            return k_opt.RMSprop(lr=self.learning_rate)

        if self.opt == 'adagrad':
            return k_opt.Adagrad(lr=self.learning_rate)

        if self.opt == 'adadelta':
            return k_opt.Adadelta(lr=self.learning_rate)

        if self.opt == 'adam':
            return k_opt.Adam(lr=self.learning_rate)

        raise Exception('Invalid optimization function - %s' % self.opt)
项目:deep-learning-with-Keras    作者:decordoba    | 项目源码 | 文件源码
def __init__(self):
        filters1 = [16, 32, 64]  # filters1 = [4, 8, 16, 32, 64, 128, 256]
        filters2 = [16, 32, 64]  # filters2 = [4, 8, 16, 32, 64, 128, 256]
        losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]  # losses1 = [losses.MSE, losses.MAE, losses.hinge, losses.categorical_crossentropy]
        optimizers1 = [optimizers.Adam()]  # optimizers1 = [optimizers.Adadelta(), optimizers.Adagrad(), optimizers.Adam(), optimizers.Adamax(), optimizers.SGD(), optimizers.RMSprop()]
        units1 = [16, 32, 64]  # units1 = [4, 8, 16, 32, 64, 128, 256]
        kernel_sizes1 = [(3, 3)]  # kernel_sizes = [(3, 3), (5, 5)]
        dropouts1 = [0.25]  # dropouts1 = [0.25, 0.5, 0.75]
        dropouts2 = [0.5]  # dropouts2 = [0.25, 0.5, 0.75]
        pool_sizes1 = [(2, 2)]  # pool_sizes1 = [(2, 2)]

        # create standard experiments structure
        self.experiments = {"filters1": filters1,
                            "filters2": filters2,
                            "losses1": losses1,
                            "units1": units1,
                            "optimizers1": optimizers1,
                            "kernel_sizes1": kernel_sizes1,
                            "dropouts1": dropouts1,
                            "dropouts2": dropouts2,
                            "pool_sizes1": pool_sizes1}
项目:auto_ml    作者:ClimbsRocks    | 项目源码 | 文件源码
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.)
项目:nea    作者:nusnlp    | 项目源码 | 文件源码
def get_optimizer(args):

    clipvalue = 0
    clipnorm = 10

    if args.algorithm == 'rmsprop':
        optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'sgd':
        optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adagrad':
        optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adadelta':
        optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adam':
        optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
    elif args.algorithm == 'adamax':
        optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)

    return optimizer
项目:kaggle-dsg-qualification    作者:Ignotus    | 项目源码 | 文件源码
def make_predictions(shape, model):

    train_data, train_ids, valid_data, valid_labels, test_data, test_ids = p.get_roof_data(shape=(shape,shape))

    print '\tInitializing model'
    opt = Adagrad(lr = LR)
    model = build_model(opt, model, shape)

    print '\tCreating predictions'
    pred = model.predict_classes(test_data, 
                          batch_size = 20, 
                          verbose=0)

    pred_valid = model.predict_classes(valid_data, 
                          batch_size = 20, 
                          verbose=0)

    pred = np.array([x + 1 for x in list(pred)])
    pred_valid = np.array([x + 1 for x in list(pred_valid)])
    print '\tWriting to file'
    make_prediction_file(test_ids, pred,'vgg_predictions', 
                         valid_labels= valid_labels,
                         valid_predictions= pred_valid)
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def my_model(X_train, y_train, X_test, y_test):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 3
    ################### Model ################

    ######### begin model ########
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # fc layer
    model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    ########################################################################
    checkpoint = ModelCheckpoint("weights/hypmodel2_maha1_noep{0}_batch{1}_seq_{2}.hdf5".format(
        no_epochs, batch, line_length), monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='min')

    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08,
                      clipvalue={{choice([0, 1, 2, 3, 4, 5, 6, 7])}})
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    history = History()
    # fit model
    model.fit(X_train, y_train, batch_size=batch, nb_epoch=no_epochs,
              validation_split=0.2, callbacks=[history, checkpoint])

    score, acc = model.evaluate(X_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def my_model(dropout):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 5
    ################### Model ################
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout(dropout))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout(dropout))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout(dropout))
    model.add(Reshape((248, 512)))
    # fc layer
    model.add(TimeDistributed(Dense(58, activation='softmax')))
    # model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    # model.layers.pop()
    # model.layers.pop()
    # model.add(Dropout(dropout))
    #model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08)
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    ###load weights####
    return model
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_adagrad():
    _test_optimizer(Adagrad())
    _test_optimizer(Adagrad(decay=1e-3))
项目:aetros-cli    作者:aetros    | 项目源码 | 文件源码
def get_learning_rate(self):

        if hasattr(self.model, 'optimizer'):
            config = self.model.optimizer.get_config()

            from keras.optimizers import Adadelta, Adam, Adamax, Adagrad, RMSprop, SGD

            if isinstance(self.model.optimizer, Adadelta) or isinstance(self.model.optimizer, Adam) \
                    or isinstance(self.model.optimizer, Adamax) or isinstance(self.model.optimizer, Adagrad)\
                    or isinstance(self.model.optimizer, RMSprop) or isinstance(self.model.optimizer, SGD):
                return config['lr'] * (1. / (1. + config['decay'] * float(K.get_value(self.model.optimizer.iterations))))

            elif 'lr' in config:
                return config['lr']
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_adagrad():
    _test_optimizer(Adagrad())
    _test_optimizer(Adagrad(decay=1e-3))
项目:StockRecommendSystem    作者:doncat99    | 项目源码 | 文件源码
def buildnetwork(self):
        model = Sequential()
        model.add(lstm(20, dropout_W=0.2, input_shape = (self.seq_len, self.n_feature)))
        #model.add(LSTM(20, dropout=0.2, input_shape=(int(self.seq_len), int(self.n_feature))))
        model.add(Dense(1, activation=None))
        model.compile(loss='mean_squared_error', optimizer=Adagrad(lr=0.002,clipvalue=10), metrics=['mean_squared_error'])

        return model
项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def main():
    batches_per_epoch = 250
    generate_size = 200
    nb_epoch = 20
    print('1. Loading data.............')
    te_con_feature,te_emb_feature,te_seq_feature,vocabs_size = load_test_dataset()

    n_con = te_con_feature.shape[1]
    n_emb = te_emb_feature.shape[1]
    print('1.1 merge con_feature,emb_feature,seq_feature.....')
    test_feature = prepare_inputX(te_con_feature,te_emb_feature,te_seq_feature)

    print('2. cluster.........')
    cluster_centers = h5py.File('cluster.h5','r')['cluster'][:]

    print('3. Building model..........')
    model = build_lstm(n_con,n_emb,vocabs_size,dis_size,emb_size,cluster_centers.shape[0])
    checkPoint = ModelCheckpoint('weights/' + model_name +'.h5',save_best_only=True)
    earlystopping = EarlyStopping(patience = 500)
    model.compile(loss=hdist,optimizer='rmsprop') #[loss = 'mse',optimizer= Adagrad]
    tr_generator = train_generator(generate_size)
    model.fit_generator(
        tr_generator,
        samples_per_epoch = batches_per_epoch* generate_size,
        nb_epoch = nb_epoch,
        validation_data = getValData(),
        verbose = 1,
        callbacks = [checkPoint,earlystopping]
    )

    print('4. Predicting result .............')
    te_predict = model.predict(test_feature)
    save_results(te_predict,result_csv_path)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_adagrad():
    _test_optimizer(Adagrad())
    _test_optimizer(Adagrad(decay=1e-3))
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def compile(self, optimizer='sgd'):

        optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
                          'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
                          'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                          'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                          'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}

        self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
        return self.model
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def compile(self, optimizer='sgd'):

        optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
                          'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
                          'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                          'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                          'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}

        model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
        return model
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def compile(self, optimizer='sgd'):

        optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
                          'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
                          'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                          'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                          'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}

        self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
        return self.model
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def compile(self, optimizer='sgd'):

        optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
                          'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
                          'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                          'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                          'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}

        self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
        return self.model
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def compile(self, optimizer='sgd'):

        optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
                          'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
                          'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                          'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                          'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}

        self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
        return self.model
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def compile(self, optimizer='sgd'):

        optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
                          'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
                          'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                          'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                          'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}

        self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
        return self.model
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def compile(self, optimizer='sgd'):

        optimizer_dicc = {'sgd': optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
                          'rmsprop': optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),
                          'adagrad': optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                          'adadelta': optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                          'adam': optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)}

        self.model.compile(optimizer=optimizer_dicc[optimizer], loss='categorical_crossentropy', metrics=['accuracy'])
        return self.model
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_adagrad(self):
        print('test Adagrad')
        self.assertTrue(_test_optimizer(Adagrad()))
项目:Deep-Reinforcement-Learning-in-Stock-Trading    作者:shenyichen105    | 项目源码 | 文件源码
def buildnetwork(self):
        model = Sequential()
        model.add(lstm(20, dropout=0.2,input_shape = (self.seq_len, self.n_feature)))
        model.add(Dense(1, activation=None))
        model.compile(loss='mean_squared_error', optimizer=Adagrad(lr=0.002,clipvalue=10), metrics=['mean_squared_error'])

        return model
项目:shenlan    作者:vector-1127    | 项目源码 | 文件源码
def train(BATCH_SIZE):
    (X_train, Y_train) = get_data('train')
    #print(np.shape(X_train))
    X_train = (X_train.astype(np.float32) - 127.5)/127.5
    Y_train = (Y_train.astype(np.float32) - 127.5)/127.5
    #X_train = X_train.reshape((X_train.shape[0], 1) + X_train.shape[1:])
    #Y_train = Y_train.reshape((Y_train.shape[0], 1) + Y_train.shape[1:])
    discriminator = discriminator_model()
    generator = generator_model()
    generator.summary()
    discriminator_on_generator = generator_containing_discriminator(generator, discriminator)
    d_optim = Adagrad(lr=0.005)
    g_optim = Adagrad(lr=0.005)
    generator.compile(loss='mse', optimizer="rmsprop")
    discriminator_on_generator.compile(loss=[generator_l1_loss,discriminator_on_generator_loss] , optimizer="rmsprop")
    discriminator.trainable = True
    discriminator.compile(loss=discriminator_loss, optimizer="rmsprop")

    for epoch in range(100):
        print("Epoch is", epoch)
        print("Number of batches", int(X_train.shape[0]/BATCH_SIZE))
        for index in range(int(X_train.shape[0]/BATCH_SIZE)):
            image_batch = Y_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]

            generated_images = generator.predict(X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE])
            if index % 20 == 0:
                image = combine_images(generated_images)
                image = image*127.5+127.5
                image = np.swapaxes(image,0,2)
                cv2.imwrite(str(epoch)+"_"+str(index)+".png",image)      
                #Image.fromarray(image.astype(np.uint8)).save(str(epoch)+"_"+str(index)+".png")

            real_pairs = np.concatenate((X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE,:,:,:],image_batch),axis=1) 
            fake_pairs = np.concatenate((X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE,:,:,:],generated_images),axis=1)
            X = np.concatenate((real_pairs,fake_pairs))
            y = np.zeros((20,1,64,64)) #[1] * BATCH_SIZE + [0] * BATCH_SIZE
            d_loss = discriminator.train_on_batch(X, y)
            pred_temp = discriminator.predict(X)
            #print(np.shape(pred_temp))
            print("batch %d d_loss : %f" % (index, d_loss))
            discriminator.trainable = False
            g_loss = discriminator_on_generator.train_on_batch(X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE,:,:,:], [image_batch,np.ones((10,1,64,64))] )
            discriminator.trainable = True
            print("batch %d g_loss : %f" % (index, g_loss[1]))
            if index % 20 == 0:
                generator.save_weights('generator', True)
                discriminator.save_weights('discriminator', True)
项目:crnn    作者:ultimate010    | 项目源码 | 文件源码
def build_model():
    main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
    embedding  = Embedding(max_features, embedding_dims,
                  weights=[np.matrix(W)], input_length=maxlen,
                  name='embedding')(main_input)

    embedding = Dropout(0.50)(embedding)

    conv4 = Conv1D(filters=nb_filter,
                          kernel_size=4,
                          padding='valid',
                          activation='relu',
                          strides=1,
                          name='conv4')(embedding)
    maxConv4 = MaxPooling1D(pool_size=2,
                             name='maxConv4')(conv4)

    conv5 = Conv1D(filters=nb_filter,
                          kernel_size=5,
                          padding='valid',
                          activation='relu',
                          strides=1,
                          name='conv5')(embedding)
    maxConv5 = MaxPooling1D(pool_size=2,
                            name='maxConv5')(conv5)

    # x = merge([maxConv4, maxConv5], mode='concat')
    x = keras.layers.concatenate([maxConv4, maxConv5])

    x = Dropout(0.15)(x)

    x = RNN(rnn_output_size)(x)

    x = Dense(hidden_dims, activation='relu', kernel_initializer='he_normal',
              kernel_constraint = maxnorm(3), bias_constraint=maxnorm(3),
              name='mlp')(x)

    x = Dropout(0.10, name='drop')(x)

    output = Dense(1, kernel_initializer='he_normal',
                   activation='sigmoid', name='output')(x)

    model = Model(inputs=main_input, outputs=output)
    model.compile(loss='binary_crossentropy',
                # optimizer=Adadelta(lr=0.95, epsilon=1e-06),
                # optimizer=Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                # optimizer=Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4),
                metrics=["accuracy"])
    return model
项目:StockRecommendSystem    作者:doncat99    | 项目源码 | 文件源码
def lstm_model(self):
        model = Sequential()
        first = True
        for idx in range(len(self.paras.model['hidden_layers'])):
            if idx == (len(self.paras.model['hidden_layers']) - 1):
                model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=False))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))
            elif first == True:
                model.add(LSTM(input_shape=(None, int(self.paras.n_features)),
                               units=int(self.paras.model['hidden_layers'][idx]),
                               return_sequences=True))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))
                first = False
            else:
                model.add(LSTM(int(self.paras.model['hidden_layers'][idx]), return_sequences=True))
                model.add(Activation(self.paras.model['activation']))
                model.add(Dropout(self.paras.model['dropout']))

        if self.paras.model['optimizer'] == 'sgd':
            #optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
            optimizer = optimizers.SGD(lr=self.paras.model['learning_rate'], decay=1e-6, momentum=0.9, nesterov=True)
        elif self.paras.model['optimizer'] == 'rmsprop':
            #optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.RMSprop(lr=self.paras.model['learning_rate']/10, rho=0.9, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adagrad':
            #optimizer = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.Adagrad(lr=self.paras.model['learning_rate'], epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adam':
            #optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
            optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adadelta':
            optimizer = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'adamax':
            optimizer = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        elif self.paras.model['optimizer'] == 'nadam':
            optimizer = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
        else:
            optimizer = optimizers.Adam(lr=self.paras.model['learning_rate']/10, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        # output layer
        model.add(Dense(units=self.paras.model['out_layer']))
        model.add(Activation(self.paras.model['out_activation']))
        model.compile(loss=self.paras.model['loss'], optimizer=optimizer, metrics=['accuracy'])

        return model
项目:character-identification    作者:emorynlp    | 项目源码 | 文件源码
def __init__(self, nb_emb_feats, embdim, dftdim, emb_nb_filters, dft_nb_filters):
        self.nb_emb_feats = nb_emb_feats
        self.emb_nb_filters = emb_nb_filters
        self.dft_nb_filters = dft_nb_filters
        self.embdim, self.dftdim = embdim, dftdim

        # Mention-Mention embedding vector
        inp_m1_emb = Input(shape=(1, nb_emb_feats, embdim))
        inp_m2_emb = Input(shape=(1, nb_emb_feats, embdim))

        conv_m_emb_1r = Convolution2D(emb_nb_filters, 1, embdim, activation='tanh')
        pool_m_emb_1r = MaxPooling2D(pool_size=(nb_emb_feats, 1))
        emb_m1_vector_1r = Reshape((emb_nb_filters,))(pool_m_emb_1r(conv_m_emb_1r(inp_m1_emb)))
        emb_m2_vector_1r = Reshape((emb_nb_filters,))(pool_m_emb_1r(conv_m_emb_1r(inp_m2_emb)))

        conv_m_emb_2r = Convolution2D(emb_nb_filters, 2, embdim, activation='tanh')
        pool_m_emb_2r = MaxPooling2D(pool_size=(nb_emb_feats-1, 1))
        emb_m1_vector_2r = Reshape((emb_nb_filters,))(pool_m_emb_2r(conv_m_emb_2r(inp_m1_emb)))
        emb_m2_vector_2r = Reshape((emb_nb_filters,))(pool_m_emb_2r(conv_m_emb_2r(inp_m2_emb)))

        conv_m_emb_3r = Convolution2D(emb_nb_filters, 3, embdim, activation='tanh')
        pool_m_emb_3r = MaxPooling2D(pool_size=(nb_emb_feats-2, 1))
        emb_m1_vector_3r = Reshape((emb_nb_filters,))(pool_m_emb_3r(conv_m_emb_3r(inp_m1_emb)))
        emb_m2_vector_3r = Reshape((emb_nb_filters,))(pool_m_emb_3r(conv_m_emb_3r(inp_m2_emb)))

        merged_vectors = merge([emb_m1_vector_1r, emb_m2_vector_1r, emb_m1_vector_2r, emb_m2_vector_2r, emb_m1_vector_3r, emb_m2_vector_3r], mode='concat')
        emb_m_matrix = Reshape((1, 2, emb_nb_filters))(merged_vectors)
        conv_mm_emb = Convolution2D(emb_nb_filters, 1, emb_nb_filters, activation='tanh')(emb_m_matrix)
        emb_mm_vector = Reshape((emb_nb_filters,))(Flatten()(MaxPooling2D(pool_size=(2, 1))(conv_mm_emb)))

        # Mention-Mention feature vector
        inp_m1_dft = Input(shape=(dftdim,))
        inp_m2_dft = Input(shape=(dftdim,))

        inp_mm_dft = Reshape((1, 2, dftdim))(merge([inp_m1_dft, inp_m2_dft], mode='concat'))
        conv_mm_dft = Convolution2D(dft_nb_filters, 1, dftdim, activation='tanh')(inp_mm_dft)
        dft_mm_vector = Reshape((dft_nb_filters,))(Flatten()(MaxPooling2D(pool_size=(2, 1))(conv_mm_dft)))

        # Regression
        prob = Dense(1, activation="sigmoid")(merge([emb_mm_vector, dft_mm_vector], mode="concat"))

        # Model compilation
        self.model = Model(input=[inp_m1_emb, inp_m2_emb, inp_m1_dft, inp_m2_dft], output=prob)
        self.model.compile(loss='mse', optimizer=Adagrad(lr=0.08))
项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def main(result_csv_path,hasCluster):
    print('1. Loading Data.........')
    tr_con_feature,tr_emb_feature,tr_label,te_con_feature,te_emb_feature,vocabs_size = load_dataset()

    n_con = tr_con_feature.shape[1]
    n_emb = tr_emb_feature.shape[1]

    train_x = prepare_inputX(tr_con_feature,tr_emb_feature)
    test_x = prepare_inputX(te_con_feature,te_emb_feature)
    print('1.1 cluster.............')
    cluster_centers = []
    if hasCluster:
        f = h5py.File('cluster.h5','r')
        cluster_centers = f['cluster'][:]
    else:
        cluster_centers = cluster()

    print('2. Building model..........')
    model = build_mlp(n_con,n_emb,vocabs_size,dis_size,emb_size,cluster_centers.shape[0])
    checkPoint = ModelCheckpoint('weights/' + model_name +'.h5',save_best_only=True)
    model.compile(loss=hdist,optimizer='rmsprop') #[loss = 'mse',optimizer= Adagrad]
    model.fit(
        train_x,
        tr_label,
        nb_epoch = 2000, #1000 # 1500
        batch_size = 500, # 500 #400
        verbose = 1,
        validation_split = 0.3,
        callbacks =([checkPoint])
    )
    ##### dump model ########
    #json_string = model.to_json()
    #open('weights/'+ model_name +'.json','w').write(json_string)
    #model.save_weights('weights/'+ model_name + '.h5',overwrite=True,)

    ####### predict #############################
    print('3. Predicting result.........')
    te_predict = model.predict(test_x)
    df_test = pd.read_csv(Test_CSV_Path,header=0)
    result = pd.DataFrame()
    result['TRIP_ID'] = df_test['TRIP_ID']
    result['LATITUDE'] = te_predict[:,1]
    result['LONGITUDE'] = te_predict[:,0]
    result.to_csv(result_csv_path,index=False)