Python keras.callbacks 模块,TensorBoard() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.callbacks.TensorBoard()

项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def callbacks(self):
        """
        :return:
        """
        # TODO: Add ReduceLROnPlateau callback
        cbs = []

        tb = TensorBoard(log_dir=self.log_dir,
                         write_graph=True,
                         write_images=True)
        cbs.append(tb)

        best_model_filename = self.model_name + '_best.h5'
        best_model = os.path.join(self.checkpoint_dir, best_model_filename)
        save_best = ModelCheckpoint(best_model, save_best_only=True)
        cbs.append(save_best)

        checkpoints = ModelCheckpoint(filepath=self.checkpoint_file, verbose=1)
        cbs.append(checkpoints)

        reduce_lr = ReduceLROnPlateau(patience=1, verbose=1)
        cbs.append(reduce_lr)
        return cbs
项目:ntm_keras    作者:flomlo    | 项目源码 | 文件源码
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
    ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    log_path = LOG_PATH_BASE + ts + "_-_" + model.name 
    tensorboard = TensorBoard(log_dir=log_path,
                                write_graph=False, #This eats a lot of space. Enable with caution!
                                #histogram_freq = 1,
                                write_images=True,
                                batch_size = model.batch_size,
                                write_grads=True)
    model_saver =  ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
    callbacks = [tensorboard, TerminateOnNaN(), model_saver]

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))

    train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))
    return
项目:Digit-Classifier    作者:ardamavi    | 项目源码 | 文件源码
def train_model(model, X, X_test, Y, Y_test):

    batch_size = 100
    epochs = 2

    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    # Creates live data:
    # For better yield. The duration of the training is extended.

    # If you don't want, use this:
    # model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0,  width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
    generated_data.fit(X)

    model.fit_generator(generated_data.flow(X, Y, batch_size=batch_size), steps_per_epoch=X.shape[0]/6, epochs=epochs, validation_data=(X_test, Y_test), callbacks=checkpoints)

    return model
项目:Msc_Multi_label_ZeroShot    作者:thomasSve    | 项目源码 | 文件源码
def train_multilabel_bts(lang_db, imdb, pretrained, max_iters = 1000, loss_func = 'squared_hinge', box_method = 'random'):
    # Create callback_list.
    dir_path = osp.join('output', 'bts_ckpt', imdb.name)
    tensor_path = osp.join(dir_path, 'log_dir')
    if not osp.exists(dir_path):
        os.makedirs(dir_path)
    if not osp.exists(tensor_path):
        os.makedirs(tensor_path)

    ckpt_save = osp.join(dir_path, lang_db.name + '_multi_label_fixed_' + 'weights-{epoch:02d}.hdf5')
    checkpoint = ModelCheckpoint(ckpt_save, monitor='loss', verbose=1, save_best_only=True)
    early_stop = EarlyStopping(monitor='loss', min_delta=0, patience=3, verbose=0, mode='auto')
    tensorboard = TensorBoard(log_dir=dir_path, histogram_freq=2000, write_graph=True, write_images=False)
    callback_list = [checkpoint, early_stop, tensorboard]
    pretrained.fit_generator(load_multilabel_data(imdb, lang_db, pretrained, box_method),
                             steps_per_epoch = 5000,
                             epochs = max_iters,
                             verbose = 1,
                             callbacks = callback_list,
                             workers = 1)

    pretrained.save(osp.join(dir_path, 'model_fixed' + imdb.name + '_' + lang_db.name + '_ML_' + box_method + '_' + loss_func + '.hdf5'))
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def train():
    model = build_main_residual_network(BATCH_SIZE,MAX_TIME_STEP,INPUT_DIM,OUTPUT_DIM,loop_depth=DEPTH)

    # deal with x,y



    # x_train = x


    model.fit(x_train, y_train, validation_split=0.1, epochs=50  , callbacks=[TensorBoard(log_dir='./residual_cnn_dir_deep_%s_all'%(DEPTH))])

    import random

    randomIndex = random.randint(0, SAMPLE_NUM)

    print('Selecting- %s as the sample' % (randomIndex))

    pred = model.predict(x_train[randomIndex:randomIndex + 1])

    print(pred)

    print(y_train[randomIndex])

    model.save(MODEL_PATH)
项目:MSgothicPolice    作者:ysdyt    | 项目源码 | 文件源码
def _build_callbacks(self):
        """Build callback objects.

        Returns:
            A list containing the following callback objects:
                - TensorBoard
                - ModelCheckpoint
        """

        tensorboard_path = os.path.join(self.checkpoints_path, 'tensorboard')
        tensorboard = TensorBoard(log_dir=tensorboard_path)

        checkpoint_path = os.path.join(self.checkpoints_path, self.checkpoint_file_format)
        checkpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=self.save_best_only)

        return [tensorboard, checkpointer]
项目:MSgothicPolice    作者:ysdyt    | 项目源码 | 文件源码
def _build_callbacks(self):
        """Build callback objects.

        Returns:
            A list containing the following callback objects:
                - TensorBoard
                - ModelCheckpoint
        """

        tensorboard_path = os.path.join(self.checkpoints_path, 'tensorboard')
        tensorboard = TensorBoard(log_dir=tensorboard_path)

        checkpoint_path = os.path.join(self.checkpoints_path, self.checkpoint_file_format)
        checkpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=self.save_best_only)

        return [tensorboard, checkpointer]
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def get_callbacks(config_data, appendix=''):
    ret_callbacks = []
    model_stored = False
    callbacks = config_data['callbacks']
    if K._BACKEND == 'tensorflow':
        tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10)
        ret_callbacks.append(tensor_board)
    for callback in callbacks:
        if callback['name'] == 'early_stopping':
            ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode']))
        elif callback['name'] == 'model_checkpoit':
            model_stored = True
            path = config_data['output_path']
            basename = config_data['output_basename']
            base_path = os.path.join(path, basename)
            opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix))
            save_best = bool(callback['save_best_only'])
            ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode']))
    return ret_callbacks, model_stored
项目:DL_for_xss    作者:SparkSharly    | 项目源码 | 文件源码
def train(train_generator,train_size,input_num,dims_num):
    print("Start Train Job! ")
    start=time.time()
    inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
    layer1=Dense(100,activation="relu")
    layer2=Dense(20,activation="relu")
    flatten=Flatten()
    layer3=Dense(2,activation="softmax",name="Output")
    optimizer=Adam()
    model=Sequential()
    model.add(inputs)
    model.add(layer1)
    model.add(Dropout(0.5))
    model.add(layer2)
    model.add(Dropout(0.5))
    model.add(flatten)
    model.add(layer3)
    call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
    model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
    model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
#    model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
    model.save(model_dir)
    end=time.time()
    print("Over train job in %f s"%(end-start))
项目:DL_for_xss    作者:SparkSharly    | 项目源码 | 文件源码
def train(train_generator,train_size,input_num,dims_num):
    print("Start Train Job! ")
    start=time.time()
    inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
    layer1=LSTM(128)
    output=Dense(2,activation="softmax",name="Output")
    optimizer=Adam()
    model=Sequential()
    model.add(inputs)
    model.add(layer1)
    model.add(Dropout(0.5))
    model.add(output)
    call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
    model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
    model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
#    model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
    model.save(model_dir)
    end=time.time()
    print("Over train job in %f s"%(end-start))
项目:VariationalAutoEncoder    作者:despoisj    | 项目源码 | 文件源码
def trainModel():
    # Create models
    print("Creating VAE...")
    vae, _, _ = getModels()
    vae.compile(optimizer='rmsprop', loss=VAELoss)

    print("Loading dataset...")
    X_train, X_test = loadDataset()
    X_train = X_train
    X_test = X_test

    # Train the VAE on dataset
    print("Training VAE...")
    runID = "VAE - ZZZ"
    tb = TensorBoard(log_dir='/tmp/logs/'+runID)
    vae.fit(X_train, X_train, shuffle=True, nb_epoch=nbEpoch, batch_size=batchSize, validation_data=(X_test, X_test), callbacks=[tb])

    # Serialize weights to HDF5
    print("Saving weights...")
    vae.save_weights(modelsPath+"model.h5")

# Generates images and plots
项目:2017_iv_deep_radar    作者:tawheeler    | 项目源码 | 文件源码
def train_discriminator(nsteps):
        mean_loss = 0.0
        for i in range(1,nsteps):
            # pick real samples
            batch_indeces = np.random.randint(0,O_train.shape[0],args.batch_size)
            y_real = Y_train[batch_indeces,:,:,:]

            # pick fake samples
            batch_indeces = np.random.randint(0,O_train.shape[0],args.batch_size)
            o_in = O_train[batch_indeces,:,:,:]
            t_in = T_train[batch_indeces,:,:,:]
            y_in = Y_train[batch_indeces,:,:,:]
            y_fake = generator.predict([o_in, t_in, y_in])[0]

            # train
            y_disc = np.vstack([y_real, y_fake])
            r = adversary.fit(y_disc, d_disc,
                              #callbacks=[TensorBoard(log_dir=args.tblog + '_D', write_graph=False)],
                              verbose=0)
            loss = r.history['loss'][0]
            mean_loss = mean_loss + loss
        return mean_loss / nsteps
项目:DeepSpell_temp    作者:surmenok    | 项目源码 | 文件源码
def iterate_training(model, dataset, initial_epoch):
    """Iterative Training"""

    checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_CHECKPOINT_FILENAME,
                                 save_best_only=True)
    tensorboard = TensorBoard()
    csv_logger = CSVLogger(CSV_LOG_FILENAME)

    X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
    show_samples_callback = LambdaCallback(
        on_epoch_end=lambda epoch, logs: show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))

    train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
    validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)

    model.fit_generator(train_batch_generator,
                        samples_per_epoch=SAMPLES_PER_EPOCH,
                        nb_epoch=NUMBER_OF_EPOCHS,
                        validation_data=validation_batch_generator,
                        nb_val_samples=SAMPLES_PER_EPOCH,
                        callbacks=[checkpoint, tensorboard, csv_logger, show_samples_callback],
                        verbose=1,
                        initial_epoch=initial_epoch)
项目:Cat-Segmentation    作者:ardamavi    | 项目源码 | 文件源码
def train_model(model, X, X_test, Y, Y_test):
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints = []
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    return model
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def create_callbacks(self, callback: Callable[[], None], tensor_board_log_directory: Path, net_directory: Path,
                         callback_step: int = 1, save_step: int = 1) -> List[Callback]:
        class CustomCallback(Callback):
            def on_epoch_end(self_callback, epoch, logs=()):
                if epoch % callback_step == 0:
                    callback()

                if epoch % save_step == 0 and epoch > 0:
                    mkdir(net_directory)

                    self.predictive_net.save_weights(str(net_directory / self.model_file_name(epoch)))

        tensorboard_if_running_tensorflow = [TensorBoard(
            log_dir=str(tensor_board_log_directory), write_images=True)] if backend.backend() == 'tensorflow' else []
        return tensorboard_if_running_tensorflow + [CustomCallback()]
项目:DeepTrade_keras    作者:happynoom    | 项目源码 | 文件源码
def make_model(input_shape, nb_epochs=100, batch_size=128, lr=0.01, n_layers=1, n_hidden=16, rate_dropout=0.3):
    model_path = 'model.%s' % input_shape[0]
    wp = WindPuller(input_shape=input_shape, lr=lr, n_layers=n_layers, n_hidden=n_hidden, rate_dropout=rate_dropout)
    train_set, test_set = read_ultimate("./", input_shape)
    wp.fit(train_set.images, train_set.labels, batch_size=batch_size,
           nb_epoch=nb_epochs, shuffle=True, verbose=1,
           validation_data=(test_set.images, test_set.labels),
           callbacks=[TensorBoard(histogram_freq=1),
                      ModelCheckpoint(filepath=model_path+'.best', save_best_only=True, mode='min')])
    scores = wp.evaluate(test_set.images, test_set.labels, verbose=0)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    wp.model.save(model_path)
    saved_wp = wp.load_model(model_path)
    scores = saved_wp.evaluate(test_set.images, test_set.labels, verbose=0)
    print('Test loss:', scores[0])
    print('test accuracy:', scores[1])
    pred = saved_wp.predict(test_set.images, 1024)
    # print(pred)
    # print(test_set.labels)
    pred = numpy.reshape(pred, [-1])
    result = numpy.array([pred, test_set.labels]).transpose()
    with open('output.' + str(input_shape[0]), 'w') as fp:
        for i in range(result.shape[0]):
            for val in result[i]:
                fp.write(str(val) + "\t")
            fp.write('\n')
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_TensorBoard_with_ReduceLROnPlateau():
    import shutil
    filepath = './logs'
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            patience=4,
            verbose=1),
        callbacks.TensorBoard(
            log_dir=filepath)]

    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)

    assert os.path.exists(filepath)
    shutil.rmtree(filepath)
项目:midi-rnn    作者:brannondorsey    | 项目源码 | 文件源码
def get_callbacks(experiment_dir, checkpoint_monitor='val_acc'):

    callbacks = []

    # save model checkpoints
    filepath = os.path.join(experiment_dir, 
                            'checkpoints', 
                            'checkpoint-epoch_{epoch:03d}-val_acc_{val_acc:.3f}.hdf5')

    callbacks.append(ModelCheckpoint(filepath, 
                                     monitor=checkpoint_monitor, 
                                     verbose=1, 
                                     save_best_only=False, 
                                     mode='max'))

    callbacks.append(ReduceLROnPlateau(monitor='val_loss', 
                                       factor=0.5, 
                                       patience=3, 
                                       verbose=1, 
                                       mode='auto', 
                                       epsilon=0.0001, 
                                       cooldown=0, 
                                       min_lr=0))

    callbacks.append(TensorBoard(log_dir=os.path.join(experiment_dir, 'tensorboard-logs'), 
                                histogram_freq=0, 
                                write_graph=True, 
                                write_images=False))

    return callbacks
项目:Controller-Hand    作者:ardamavi    | 项目源码 | 文件源码
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')

    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def train():
    model = build_stateful_lstm_model(BATCH_SIZE, TIME_STEP, INPUT_DIM, OUTPUT_DIM, dropout=0.1)

    # model.fit(x_train,y_train,validation_data=(x_train[:10],y_train[:10]),epochs=5,callbacks=[TensorBoard()],batch_size=1)

    for index, y_dat in enumerate(y):
        print('Run test on %s' % (index))
        model.fit(np.array([x[index]]), y_dat.reshape(1, 3),
                  validation_data=(np.array([x[index]]), y_dat.reshape(1, 3)), epochs=10, callbacks=[TensorBoard()])
        model.save(MODEL_PATH)
        x_pred = model.predict(np.array([x[index]]))
        print(x_pred)
        print(y_dat)

    model.save(MODEL_PATH)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def train():
    model = build_real_stateful_lstm_model_with_normalization(BATCH_SIZE, TIME_STEP, INPUT_DIM, OUTPUT_DIM)

    # deal with x,y



    # x_train = x


    model.fit(x_train[:SAMPLE_NUM//BATCH_SIZE*BATCH_SIZE],
              y_train[:SAMPLE_NUM//BATCH_SIZE*BATCH_SIZE],
              batch_size=BATCH_SIZE,
              validation_split=0,
              epochs=30, callbacks=[TensorBoard(log_dir='./stateful_lstm_fixed')])

    # for index,y_dat in enumerate(y):
    #     print('Run test on %s' %(index))
    #     # print(y_dat.reshape(3,1))
    #     model.fit(np.array([x[index]]),np.array([y_dat.reshape(1,3)]),validation_data=(np.array([x[index]]),np.array([y_dat.reshape(1,3)])),epochs=100,callbacks=[TensorBoard()])
    #     model.save(MODEL_PATH)
    #     x_pred = model.predict(np.array([x[index]]))
    #     print(x_pred,x_pred.shape)
    #     print(np.array([y_dat.reshape(1,3)]))

    import random

    randomIndex = random.randint(0, SAMPLE_NUM)

    print('Selecting %s as the sample' % (randomIndex))

    pred = model.predict(x_train[randomIndex:randomIndex + 1])

    print(pred)

    print(y_train[randomIndex])

    model.save(MODEL_PATH)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def train():
    print('Done')
    model = build_2d_main_residual_network(BATCH_SIZE,MAX_TIME_STEP,INPUT_DIM,2,OUTPUT_DIM,loop_depth=DEPTH)
    # model = build_main_residual_network(BATCH_SIZE,MAX_TIME_STEP,INPUT_DIM,OUTPUT_DIM,loop_depth=DEPTH)

    # deal with x,y



    # x_train = x


    model.fit(x_train, y_train, validation_split=0.1, epochs=50, callbacks=[TensorBoard(log_dir='./residual_freq_cnn_dir_deep_%s_all'%(DEPTH))])

    import random

    randomIndex = random.randint(0, SAMPLE_NUM)

    print('Selecting- %s as the sample' % (randomIndex))

    pred = model.predict(x_train[randomIndex:randomIndex + 1])

    print(pred)

    print(y_train[randomIndex])

    model.save(MODEL_PATH)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def train():
    model = build_stateful_lstm_model_with_normalization(BATCH_SIZE, TIME_STEP, INPUT_DIM, OUTPUT_DIM, dropout=0.1)

    # model.fit(x_train,y_train,validation_data=(x_train[:10],y_train[:10]),epochs=5,callbacks=[TensorBoard()],batch_size=1)

    for index, y_dat in enumerate(y):
        print('Run test on %s' % (index))
        model.fit(np.array([x[index]]), y_dat.reshape(1, 3),
                  validation_data=(np.array([x[index]]), y_dat.reshape(1, 3)), epochs=10, callbacks=[TensorBoard()])
        model.save(MODEL_PATH)
        x_pred = model.predict(np.array([x[index]]))
        print(x_pred)
        print(y_dat)

    model.save(MODEL_PATH)
项目:Msc_Multi_label_ZeroShot    作者:thomasSve    | 项目源码 | 文件源码
def train_bts(lang_db, imdb, max_iters = 1000, loss = 'squared_hinge'):
    # Define network
    model = define_network(lang_db.vector_size, loss)

    #model = load_model(osp.join('output', 'bts_ckpt', 'imagenet1k_train_bts', 'glove_wiki_300_hinge_weights-03.hdf5'))

    # Create callback_list.
    dir_path = osp.join('output', 'bts_ckpt', imdb.name)
    if not osp.exists(dir_path):
        os.makedirs(dir_path)

    log_dir = osp.join('output', 'bts_logs', imdb.name)
    if not osp.exists(log_dir):
        os.makedirs(log_dir)

    ckpt_save = osp.join(dir_path, lang_db.name + "_" + loss + "_weights-{epoch:02d}.hdf5")
    checkpoint = ModelCheckpoint(ckpt_save, monitor='val_loss', verbose=1, save_best_only = True)
    early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')

    tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=False)
    callback_list = [checkpoint, early_stop, tensorboard]
    model.fit_generator(load_data(imdb, lang_db),
                        steps_per_epoch = 5000,
                        epochs = max_iters,
                        verbose = 1,
                        validation_data = imdb.load_val_data(lang_db),
                        validation_steps = 20000, # number of images to validate on
                        callbacks = callback_list,
                        workers = 1)

    model.save(osp.join(dir_path, 'model_'  + imdb.name + '_' + lang_db.name + '_' + loss + '_l2.hdf5'))
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_TensorBoard_with_ReduceLROnPlateau():
    import shutil
    filepath = './logs'
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            patience=4,
            verbose=1),
        callbacks.TensorBoard(
            log_dir=filepath)]

    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)

    assert os.path.exists(filepath)
    shutil.rmtree(filepath)
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def conv_autoencode_mnist():
    (x_train, y_train), (x_test, y_test) = load_mnist(flatten=False)
    autoencoder = build_conv_autoencoder()
    autoencoder.summary()
    autoencoder.fit(x_train, x_train,
        epochs=55,
        batch_size=128,
        shuffle=True,
        validation_data=(x_test, x_test),
        callbacks=[TensorBoard(log_dir='./tmp/autoencoder')])   

    decoded_imgs = autoencoder.predict(x_test)
    plot_imgs_and_reconstructions(x_test, decoded_imgs, n=10)
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def conv_autoencode_cifar():
    (x_train, y_train), (x_test, y_test) = load_cifar(flatten=False)
    autoencoder = build_conv_autoencoder(input_dim=(32,32,3))
    autoencoder.summary()

    autoencoder.fit(x_train, x_train,
        epochs=25,
        batch_size=64,
        shuffle=True,
        validation_data=(x_test, x_test),
        callbacks=[TensorBoard(log_dir='./tmp/autoencoder')])   

    decoded_imgs = autoencoder.predict(x_test)
    plot_imgs_and_reconstructions(x_test, decoded_imgs, n=10, shape=(32,32,3))
项目:Jetson-RaceCar-AI    作者:ardamavi    | 项目源码 | 文件源码
def train_model(model, X_1, X_2, Y):

    batch_size = 1
    epochs = 10

    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    model.fit([X_1, X_2], Y, batch_size=batch_size, epochs=epochs, validation_data=([X_1, X_2], Y), shuffle=True, callbacks=checkpoints)

    return model
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def train(self, train_batches, valid_batches, samples_per_epoch, nb_epoch, nb_val_samples, extra_callbacks=None):
        """Train the model.

        Automatically adds the following Keras callbacks:
            - ModelCheckpoint
            - EarlyStopping
            - TensorBoard

        Args:
            train_batches (Iterable[Batch]): an iterable of training Batches
            valid_batches (Iterable[Batch]): an iterable of validation Batches
            samples_per_epoch (int)
            nb_epoch (int): max number of epochs to train for
            nb_val_samples (int): number of samples for validation
            extra_callbacks (list): a list of additional Keras callbacks to run
        """
        checkpoint_path = join(self.checkpoint_dir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
        checkpointer = ModelCheckpoint(checkpoint_path, verbose=1, save_best_only=False)
        early_stopper = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
        tboard = TensorBoard(self.tensorboard_dir, write_graph=False)

        callbacks = [checkpointer, early_stopper, tboard]
        if extra_callbacks:
            callbacks.extend(extra_callbacks)

        train = self._vectorized_batches(train_batches)
        valid = self._vectorized_batches(valid_batches)

        self.keras_model.fit_generator(train, samples_per_epoch, nb_epoch,
                                       callbacks=callbacks,
                                       validation_data=valid, nb_val_samples=nb_val_samples
                                       )
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def train(self, train_batches, valid_batches, samples_per_epoch, nb_epoch, nb_val_samples, extra_callbacks=None):
        """Train the model.

        Automatically adds the following Keras callbacks:
            - ModelCheckpoint
            - EarlyStopping
            - TensorBoard

        Args:
            train_batches (Iterable[Batch]): an iterable of training Batches
            valid_batches (Iterable[Batch]): an iterable of validation Batches
            samples_per_epoch (int)
            nb_epoch (int): max number of epochs to train for
            nb_val_samples (int): number of samples for validation
            extra_callbacks (list): a list of additional Keras callbacks to run
        """
        checkpoint_path = join(self.checkpoint_dir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
        checkpointer = ModelCheckpoint(checkpoint_path, verbose=1, save_best_only=False)
        early_stopper = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
        tboard = TensorBoard(self.tensorboard_dir, write_graph=False)

        callbacks = [checkpointer, early_stopper, tboard]
        if extra_callbacks:
            callbacks.extend(extra_callbacks)

        train = self._vectorized_batches(train_batches)
        valid = self._vectorized_batches(valid_batches)

        self.keras_model.fit_generator(train, samples_per_epoch, nb_epoch,
                                       callbacks=callbacks,
                                       validation_data=valid, nb_val_samples=nb_val_samples
                                       )
项目:keras-autoencoder    作者:Rentier    | 项目源码 | 文件源码
def train(self, x_train, x_test, epochs, batch_size, log_dir='/tmp/autoencoder', stop_early=True):
        callbacks = []
        if backend._BACKEND == 'tensorflow':
            callbacks.append(TensorBoard(log_dir=log_dir))

        if stop_early:
            callbacks.append(EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto'))

        self.autoencoder.fit(x_train, x_train,
                nb_epoch=epochs,
                batch_size=batch_size,
                shuffle=True,
                validation_data=(x_test, x_test),
                callbacks=callbacks)
项目:devise-keras    作者:priyamtejaswin    | 项目源码 | 文件源码
def main():
    RUN_TIME = sys.argv[1]


    if RUN_TIME == "TRAIN":
        image_features = Input(shape=(4096,))
        model = build_model(image_features)
        print model.summary()

        # number of training images 
        _num_train = get_num_train_images()

        # Callbacks 
        # remote_cb = RemoteMonitor(root='http://localhost:9000')
        tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
        epoch_cb    = EpochCheckpoint(folder="./snapshots/")
        valid_cb    = ValidCallBack()

        # fit generator
        steps_per_epoch = math.ceil(_num_train/float(BATCH))
        print "Steps per epoch i.e number of iterations: ",steps_per_epoch

        train_datagen = data_generator(batch_size=INCORRECT_BATCH, image_class_ranges=TRAINING_CLASS_RANGES)
        history = model.fit_generator(
                train_datagen,
                steps_per_epoch=steps_per_epoch,
                epochs=250,
                callbacks=[tensorboard, valid_cb]
            )
        print history.history.keys()


    elif RUN_TIME == "TEST":
        from keras.models import load_model 
        model = load_model("snapshots/epoch_49.hdf5", custom_objects={"hinge_rank_loss":hinge_rank_loss})

    K.clear_session()
项目:pythontest    作者:gjq246    | 项目源码 | 文件源码
def on_batch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss')) #?????????self.losses

# ????????TensorBoard????????batch??????
项目:anago    作者:Hironsan    | 项目源码 | 文件源码
def get_callbacks(log_dir=None, valid=(), tensorboard=True, eary_stopping=True):
    """Get callbacks.

    Args:
        log_dir (str): the destination to save logs(for TensorBoard).
        valid (tuple): data for validation.
        tensorboard (bool): Whether to use tensorboard.
        eary_stopping (bool): whether to use early stopping.

    Returns:
        list: list of callbacks
    """
    callbacks = []

    if log_dir and tensorboard:
        if not os.path.exists(log_dir):
            print('Successfully made a directory: {}'.format(log_dir))
            os.mkdir(log_dir)
        callbacks.append(TensorBoard(log_dir))

    if valid:
        callbacks.append(F1score(*valid))

    if log_dir:
        if not os.path.exists(log_dir):
            print('Successfully made a directory: {}'.format(log_dir))
            os.mkdir(log_dir)

        file_name = '_'.join(['model_weights', '{epoch:02d}', '{f1:2.2f}']) + '.h5'
        save_callback = ModelCheckpoint(os.path.join(log_dir, file_name),
                                        monitor='f1',
                                        save_weights_only=True)
        callbacks.append(save_callback)

    if eary_stopping:
        callbacks.append(EarlyStopping(monitor='f1', patience=3, mode='max'))

    return callbacks
项目:DL_for_xss    作者:SparkSharly    | 项目源码 | 文件源码
def train(train_generator,train_size,input_num,dims_num):
    print("Start Train Job! ")
    start=time.time()
    inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
    layer1=Conv1D(64,3,activation="relu")
    layer2=Conv1D(64,3,activation="relu")
    layer3=Conv1D(128,3,activation="relu")
    layer4=Conv1D(128,3,activation="relu")
    layer5=Dense(128,activation="relu")
    output=Dense(2,activation="softmax",name="Output")
    optimizer=Adam()
    model=Sequential()
    model.add(inputs)
    model.add(layer1)
    model.add(layer2)
    model.add(MaxPool1D(pool_size=2))
    model.add(Dropout(0.5))
    model.add(layer3)
    model.add(layer4)
    model.add(MaxPool1D(pool_size=2))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(layer5)
    model.add(Dropout(0.5))
    model.add(output)
    call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
    model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
    model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
#    model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
    model.save(model_dir)
    end=time.time()
    print("Over train job in %f s"%(end-start))
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_TensorBoard_with_ReduceLROnPlateau():
    import shutil
    filepath = './logs'
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            patience=4,
            verbose=1),
        callbacks.TensorBoard(
            log_dir=filepath)]

    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)

    assert os.path.exists(filepath)
    shutil.rmtree(filepath)
项目:2017_iv_deep_radar    作者:tawheeler    | 项目源码 | 文件源码
def train_generator(nsteps):
        mean_loss = 0.0
        for i in range(1,nsteps):
            batch_indeces = np.random.randint(0,O_train.shape[0],args.batch_size)
            o_in = O_train[batch_indeces,:,:,:]
            t_in = T_train[batch_indeces,:,:,:]
            y_in = Y_train[batch_indeces,:,:,:]
            r = generator.fit([o_in,t_in,y_in], [y_in, d_comb],
                             #callbacks=[TensorBoard(log_dir=args.tblog + '_G', write_graph=False)],
                             verbose=0)
            loss = r.history['loss'][0]
            mean_loss = mean_loss + loss
        return mean_loss / nsteps
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def TensorBoardCallback(batch_size):
    return TensorBoard(log_dir=TENSORBOARD_LOGDIR,
                       histogram_freq=2,
                       batch_size=batch_size,
                       write_graph=True,
                       write_grads=False,
                       write_images=False,
                       embeddings_freq=0,
                       embeddings_layer_names=None,
                       embeddings_metadata=None)
项目:WaterNet    作者:treigerm    | 项目源码 | 文件源码
def train_model(model,
                features,
                labels,
                tile_size,
                model_id,
                nb_epoch=10,
                checkpoints=False,
                tensorboard=False):
    """Train a model with the given features and labels."""

    # The features and labels are a list of triples when passed
    # to the function. Each triple contains the tile and information
    # about its source image and its postion in the source. To train
    # the model we extract just the tiles.
    X, y = get_matrix_form(features, labels, tile_size)

    X = normalise_input(X)

    # Directory which is used to store the model and its weights.
    model_dir = os.path.join(MODELS_DIR, model_id)

    checkpointer = None
    if checkpoints:
        checkpoints_file = os.path.join(model_dir, "weights.hdf5")
        checkpointer = ModelCheckpoint(checkpoints_file)

    tensorboarder = None
    if tensorboard:
        log_dir = os.path.join(TENSORBOARD_DIR, model_id)
        tensorboarder = TensorBoard(log_dir=log_dir)

    callbacks = [c for c in [checkpointer, tensorboarder] if c]

    print("Start training.")
    model.fit(X, y, nb_epoch=nb_epoch, callbacks=callbacks, validation_split=0.1)

    save_model(model, model_dir)
    return model
项目:deep-learning-experiments    作者:raghakot    | 项目源码 | 文件源码
def train(name, model, callbacks=None, batch_size=32, nb_epoch=200):
    """Common cifar10 training code.
    """
    callbacks = callbacks or []
    tb = TensorBoard(log_dir='./logs/{}'.format(name))
    model_checkpoint = ModelCheckpoint('./weights/{}.hdf5'.format(name), monitor='val_loss', save_best_only=True)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-7)
    callbacks.extend([reduce_lr, tb, model_checkpoint])

    print("Training {}".format(name))

    # This will do preprocessing and realtime data augmentation:
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(X_train)

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        samples_per_epoch=X_train.shape[0],
                        nb_epoch=nb_epoch, verbose=2, max_q_size=1000,
                        callbacks=callbacks, validation_data=(X_test, Y_test))
项目:tying-wv-and-wc    作者:icoxfog417    | 项目源码 | 文件源码
def _get_callbacks(self):
        callbacks = [self.model.optimizer.get_lr_scheduler()]
        folder_name = self.get_name()
        self_path = os.path.join(self.checkpoint_path, folder_name)
        if self.checkpoint_path:
            if not os.path.exists(self.checkpoint_path):
                print("Make folder to save checkpoint file to {}".format(self.checkpoint_path))
                os.mkdir(self.checkpoint_path)
            if not os.path.exists(self_path):
                os.mkdir(self_path)

            file_name = "_".join(["model_weights", "{epoch:02d}", "{val_acc:.2f}"]) + ".h5"
            save_callback = ModelCheckpoint(os.path.join(self_path, file_name), save_weights_only=True)
            callbacks += [save_callback]

            if self.tensor_board:
                board_path = os.path.join(self.checkpoint_path, "tensor_board")
                self_board_path = os.path.join(board_path, folder_name)
                if not os.path.exists(board_path):
                    print("Make folder to visualize on TensorBoard to {}".format(board_path))
                    os.mkdir(board_path)
                if not os.path.exists(self_board_path):
                    os.mkdir(self_board_path)
                callbacks += [TensorBoard(self_board_path)]
                print("invoke tensorboard at {}".format(board_path))

        return callbacks
项目:nesgym    作者:codescv    | 项目源码 | 文件源码
def __init__(self,
                 image_shape,
                 num_actions,
                 frame_history_len=4,
                 replay_buffer_size=1000000,
                 training_freq=4,
                 training_starts=5000,
                 training_batch_size=32,
                 target_update_freq=1000,
                 reward_decay=0.99,
                 exploration=LinearSchedule(5000, 0.1),
                 log_dir="logs/"):
        """
            Double Deep Q Network
            params:
            image_shape: (height, width, n_values)
            num_actions: how many different actions we can choose
            frame_history_len: feed this number of frame data as input to the deep-q Network
            replay_buffer_size: size limit of replay buffer
            training_freq: train base q network once per training_freq steps
            training_starts: only train q network after this number of steps
            training_batch_size: batch size for training base q network with gradient descent
            reward_decay: decay factor(called gamma in paper) of rewards that happen in the future
            exploration: used to generate an exploration factor(see 'epsilon-greedy' in paper).
                         when rand(0,1) < epsilon, take random action; otherwise take greedy action.
            log_dir: path to write tensorboard logs
        """
        super().__init__()
        self.num_actions = num_actions
        self.training_freq = training_freq
        self.training_starts = training_starts
        self.training_batch_size = training_batch_size
        self.target_update_freq = target_update_freq
        self.reward_decay = reward_decay
        self.exploration = exploration

        # use multiple frames as input to q network
        input_shape = image_shape[:-1] + (image_shape[-1] * frame_history_len,)
        # used to choose action
        self.base_model = q_model(input_shape, num_actions)
        self.base_model.compile(optimizer=optimizers.adam(clipnorm=10, lr=1e-4, decay=1e-6, epsilon=1e-4), loss='mse')
        # used to estimate q values
        self.target_model = q_model(input_shape, num_actions)

        self.replay_buffer = ReplayBuffer(size=replay_buffer_size, frame_history_len=frame_history_len)
        # current replay buffer offset
        self.replay_buffer_idx = 0

        self.tensorboard_callback = TensorBoard(log_dir=log_dir)
        self.latest_losses = deque(maxlen=100)
项目:Image-Caption-Generator    作者:abi-aryan    | 项目源码 | 文件源码
def train(batch_size=128,
          epochs=100,
          data_dir="/home/shagun/projects/Image-Caption-Generator/data/",
          weights_path=None,
          mode="train"):
    '''Method to train the image caption generator
    weights_path is the path to the .h5 file where weights from the previous
    run are saved (if available)'''

    config_dict = generate_config(data_dir=data_dir,
                                  mode=mode)
    config_dict['batch_size'] = batch_size
    steps_per_epoch = config_dict["total_number_of_examples"] // batch_size

    print("steps_per_epoch = ", steps_per_epoch)
    train_data_generator = debug_generator(config_dict=config_dict,
                                           data_dir=data_dir)

    model = create_model(config_dict=config_dict)

    if weights_path:
        model.load_weights(weights_path)

    file_name = data_dir + "model/weights-{epoch:02d}.hdf5"
    checkpoint = ModelCheckpoint(filepath=file_name,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    tensorboard = TensorBoard(log_dir='../logs',
                              histogram_freq=0,
                              batch_size=batch_size,
                              write_graph=True,
                              write_grads=True,
                              write_images=False,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)

    callbacks_list = [checkpoint, tensorboard]
    model.fit_generator(
        generator=train_data_generator,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        verbose=2,
        callbacks=callbacks_list)
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def show_result():
    from keras.models import load_model
    model = load_model(MODEL_PATH)
    # model.fit(x_train,y_train,validation_data=(x_train[:10],y_train[:10]),epochs=5,callbacks=[TensorBoard()],batch_size=1)

    SAMPLE_NUM = 315

    a = np.zeros(SAMPLE_NUM)
    b = np.zeros(SAMPLE_NUM)
    c = np.zeros(SAMPLE_NUM)

    real_a = np.zeros(SAMPLE_NUM)
    real_b = np.zeros(SAMPLE_NUM)
    real_c = np.zeros(SAMPLE_NUM)

    for index, y_dat in enumerate(y):
        print('Run prediction on %s' % (index))
        # model.fit(np.array([x[index]]), y_dat.reshape(1, 3),
        #           validation_data=(np.array([x[index]]), y_dat.reshape(1, 3)), epochs=10, callbacks=[TensorBoard()])
        x_pred = model.predict(np.array([x[index]]))
        print(x_pred,y_dat)
        print(x_pred.shape,y_dat.shape)
        real_a[index] = y_dat.reshape(1,3)[0][0]
        real_b[index] = y_dat.reshape(1,3)[0][1]
        real_c[index] = y_dat.reshape(1,3)[0][2]

        a[index] = x_pred[0][0]
        b[index] = x_pred[0][1]
        c[index] = x_pred[0][2]

    import matplotlib.pyplot as plt

    plt.plot(np.arange(SAMPLE_NUM), a, label='a')
    plt.plot(np.arange(SAMPLE_NUM), real_a, label='real_a')
    plt.title('A')
    plt.legend()
    plt.show()

    plt.plot(np.arange(SAMPLE_NUM), b, label='b')
    plt.plot(np.arange(SAMPLE_NUM), real_b, label='real_b')
    plt.title('B')
    plt.legend()
    plt.show()

    plt.plot(np.arange(SAMPLE_NUM), c, label='c')
    plt.plot(np.arange(SAMPLE_NUM), real_c, label='real_c')
    plt.title('C')
    plt.legend()
    plt.show()
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def train():
    model = build_multi_1d_cnn_model(BATCH_SIZE,
                                     TIME_STEP,
                                     INPUT_DIM,
                                     OUTPUT_DIM,
                                     dropout=0.4,
                                     kernel_size=3,
                                     pooling_size=2,
                                     conv_dim=(128, 64, 32),
                                     stack_loop_num=2)

    # deal with x,y



    # x_train = x


    model.fit(x_train, y_train, validation_split=0, epochs=50, callbacks=[TensorBoard(log_dir='./cnn_dir')], batch_size=10)

    # for index,y_dat in enumerate(y):
    #     print('Run test on %s' %(index))
    #     # print(y_dat.reshape(3,1))
    #     model.fit(np.array([x[index]]),np.array([y_dat.reshape(1,3)]),validation_data=(np.array([x[index]]),np.array([y_dat.reshape(1,3)])),epochs=100,callbacks=[TensorBoard()])
    #     model.save(MODEL_PATH)
    #     x_pred = model.predict(np.array([x[index]]))
    #     print(x_pred,x_pred.shape)
    #     print(np.array([y_dat.reshape(1,3)]))

    import random

    randomIndex = random.randint(0, SAMPLE_NUM)

    print('Selecting %s as the sample' % (randomIndex))

    pred = model.predict(x_train[randomIndex:randomIndex + 1])

    print(pred)

    print(y_train[randomIndex])

    model.save(MODEL_PATH)
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def train_model(self, X_train, y_train, X_test, y_test, X_val, y_val):

        for i in range(self.iters):
            split_buckets = self.bucket_function(i)
            experts_out_train = np.empty((self.train_dim[0], self.experts), dtype='float64')
            experts_out_test = np.empty((self.test_dim[0], self.experts), dtype='float64')
            experts_out_val = np.empty((self.val_dim[0], self.experts), dtype='float64')

            j = 0
            for expert_index in sorted(split_buckets):
                print("############################# Expert {} Iter {} ################################".format(j, i))
                X = X_train[split_buckets[expert_index]]
                y = y_train[split_buckets[expert_index]]
                model = self.svc_model(X, y, X_test, y_test, X_val, y_val, i, j)

                experts_out_train[:, expert_index] = model.predict(X_train)
                experts_out_test[:, expert_index] = model.predict(X_test)
                experts_out_val[:, expert_index] = model.predict(X_val)

                j += 1

            gater_model = self.gater()
            early_callback = CustomCallback()
            tb_callback = TensorBoard(log_dir=self.tf_log + str(i))
            history = gater_model.fit([X_train, experts_out_train], y_train, shuffle=True,
                                      batch_size=self.batch_size, verbose=1, validation_data=([X_val, experts_out_val], y_val),
                                      epochs=1000, callbacks=[tb_callback, early_callback])

            train_accuracy = self.moe_eval(gater_model, X_train, y_train, experts_out_train)
            test_accuracy = self.moe_eval(gater_model, X_test, y_test, experts_out_test)
            val_accuracy = self.moe_eval(gater_model, X_val, y_val, experts_out_val)

            print('Train Accuracy', train_accuracy)
            print('Test Accuracy', test_accuracy)
            print('Val Accuracy', val_accuracy)

            tre = 100 - train_accuracy
            tte = 100 - test_accuracy
            vale = 100 - val_accuracy
            expert_units = Model(inputs=gater_model.input,
                                outputs=gater_model.get_layer('layer_op_2').output)

            self.wm_xi = expert_units.predict([X_train, experts_out_train])

            logging.info('{}, {}, {}, {}'.format(i, tre, vale, tte))

        return None
项目:pixelcnn_keras    作者:suga93    | 项目源码 | 文件源码
def __init__(
        self,
        input_size,
        nb_channels=3,
        conditional=False,
        latent_dim=10,
        nb_pixelcnn_layers=13,
        nb_filters=128,
        filter_size_1st=(7,7),
        filter_size=(3,3),
        optimizer='adadelta',
        es_patience=100,
        save_root='/tmp/pixelcnn',
        save_best_only=False,
        **kwargs):
        '''
        Args:
            input_size ((int,int))      : (height, width) pixels of input images
            nb_channels (int)           : Number of channels for input images. (1 for grayscale images, 3 for color images)
            conditional (bool)          : if True, use latent vector to model the conditional distribution p(x|h) (default:False)
            latent_dim (int)            : (if conditional==True,) Dimensions for latent vector.
            nb_pixelcnn_layers (int)    : Number of layers (except last two ReLu layers). (default:13)
            nb_filters (int)            : Number of filters (feature maps) for each layer. (default:128)
            filter_size_1st ((int, int)): Kernel size for the first layer. (default: (7,7))
            filter_size ((int, int))    : Kernel size for the subsequent layers. (default: (3,3))
            optimizer (str)             : SGD optimizer (default: 'adadelta')
            es_patience (int)           : Number of epochs with no improvement after which training will be stopped (EarlyStopping)
            save_root (str)             : Root directory to which {trained model file, parameter.txt, tensorboard log file} are saved
            save_best_only (bool)       : if True, the latest best model will not be overwritten (default: False)
        '''
        K.set_image_dim_ordering('tf')

        self.input_size = input_size
        self.conditional = conditional
        self.latent_dim = latent_dim
        self.nb_pixelcnn_layers = nb_pixelcnn_layers
        self.nb_filters = nb_filters
        self.filter_size_1st = filter_size_1st
        self.filter_size = filter_size
        self.nb_channels = nb_channels
        if self.nb_channels == 1:
            self.loss = 'binary_crossentropy'
        elif self.nb_channels == 3:
            self.loss = 'categorical_crossentropy'
        self.optimizer = optimizer
        self.es_patience = es_patience
        self.save_best_only = save_best_only

        tensorboard_dir = os.path.join(save_root, 'pixelcnn-tensorboard')
        checkpoint_path = os.path.join(save_root, 'pixelcnn-weights.{epoch:02d}-{val_loss:.4f}.hdf5')
        self.tensorboard = TensorBoard(log_dir=tensorboard_dir)
        ### "save_weights_only=False" causes error when exporting model architecture. (json or yaml)
        self.checkpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=True, save_best_only=save_best_only)
        self.earlystopping = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=0, mode='auto')
项目:Image-Caption-Generator    作者:shagunsodhani    | 项目源码 | 文件源码
def train(batch_size=128,
          epochs=100,
          data_dir="/home/shagun/projects/Image-Caption-Generator/data/",
          weights_path=None,
          mode="train"):
    '''Method to train the image caption generator
    weights_path is the path to the .h5 file where weights from the previous
    run are saved (if available)'''

    config_dict = generate_config(data_dir=data_dir,
                                  mode=mode)
    config_dict['batch_size'] = batch_size
    steps_per_epoch = config_dict["total_number_of_examples"] // batch_size

    print("steps_per_epoch = ", steps_per_epoch)
    train_data_generator = debug_generator(config_dict=config_dict,
                                           data_dir=data_dir)

    model = create_model(config_dict=config_dict)

    if weights_path:
        model.load_weights(weights_path)

    file_name = data_dir + "model/weights-{epoch:02d}.hdf5"
    checkpoint = ModelCheckpoint(filepath=file_name,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    tensorboard = TensorBoard(log_dir='../logs',
                              histogram_freq=0,
                              batch_size=batch_size,
                              write_graph=True,
                              write_grads=True,
                              write_images=False,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)

    callbacks_list = [checkpoint, tensorboard]
    model.fit_generator(
        generator=train_data_generator,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        verbose=2,
        callbacks=callbacks_list)
项目:YAD2K    作者:allanzelener    | 项目源码 | 文件源码
def train(model, class_names, anchors, image_data, boxes, detectors_mask, matching_true_boxes, validation_split=0.1):
    '''
    retrain/fine-tune the model

    logs training with tensorboard

    saves training weights in current directory

    best weights according to val_loss is saved as trained_stage_3_best.h5
    '''
    model.compile(
        optimizer='adam', loss={
            'yolo_loss': lambda y_true, y_pred: y_pred
        })  # This is a hack to use the custom loss function in the last layer.


    logging = TensorBoard()
    checkpoint = ModelCheckpoint("trained_stage_3_best.h5", monitor='val_loss',
                                 save_weights_only=True, save_best_only=True)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')

    model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
              np.zeros(len(image_data)),
              validation_split=validation_split,
              batch_size=32,
              epochs=5,
              callbacks=[logging])
    model.save_weights('trained_stage_1.h5')

    model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)

    model.load_weights('trained_stage_1.h5')

    model.compile(
        optimizer='adam', loss={
            'yolo_loss': lambda y_true, y_pred: y_pred
        })  # This is a hack to use the custom loss function in the last layer.


    model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
              np.zeros(len(image_data)),
              validation_split=0.1,
              batch_size=8,
              epochs=30,
              callbacks=[logging])

    model.save_weights('trained_stage_2.h5')

    model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
              np.zeros(len(image_data)),
              validation_split=0.1,
              batch_size=8,
              epochs=30,
              callbacks=[logging, checkpoint, early_stopping])

    model.save_weights('trained_stage_3.h5')
项目:SerpentAI    作者:SerpentAI    | 项目源码 | 文件源码
def __init__(
        self,
        input_shape=None,
        input_mapping=None,
        replay_memory_size=10000,
        batch_size=32,
        action_space=None,
        max_steps=1000000,
        observe_steps=None,
        initial_epsilon=1.0,
        final_epsilon=0.1,
        gamma=0.99,
        model_file_path=None,
        model_learning_rate=2.5e-4,
        override_epsilon=False
    ):
        self.type = "DQN"
        self.input_shape = input_shape
        self.replay_memory = ReplayMemory(memory_size=replay_memory_size)
        self.batch_size = batch_size
        self.action_space = action_space
        self.action_count = len(self.action_space.combinations)
        self.action_input_mapping = self._generate_action_space_combination_input_mapping(input_mapping)
        self.frame_stack = None
        self.max_steps = max_steps
        self.observe_steps = observe_steps or (0.1 * replay_memory_size)
        self.current_observe_step = 0
        self.current_step = 0
        self.initial_epsilon = initial_epsilon
        self.final_epsilon = final_epsilon
        self.previous_epsilon = initial_epsilon
        self.epsilon_greedy_q_policy = EpsilonGreedyQPolicy(
            initial_epsilon=self.initial_epsilon,
            final_epsilon=self.final_epsilon,
            max_steps=self.max_steps
        )
        self.gamma = gamma
        self.current_action = None
        self.current_action_index = None
        self.current_action_type = None
        self.first_run = True
        self.mode = "OBSERVE"

        self.model_learning_rate = model_learning_rate
        self.model = self._initialize_model()

        if model_file_path is not None:
            self.load_model_weights(model_file_path, override_epsilon)

        self.model_loss = 0

        #self.keras_callbacks = list()
        #self.keras_callbacks.append(TensorBoard(log_dir='/tmp/logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

        self.visual_debugger = VisualDebugger()