Python keras.callbacks 模块,History() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用keras.callbacks.History()

项目:geom_rcnn    作者:asbroad    | 项目源码 | 文件源码
def train_model(self):
        if self.verbose:
            print 'training model ... '
            start_time = time.time()

        self.checkpointer = ModelCheckpoint(filepath=self.weights_filename, verbose=1, save_best_only=True)
        self.history = History()

        self.model.fit_generator(self.datagen.flow(self.xs_train, self.ys_train, batch_size=32),
                    samples_per_epoch=len(self.xs_train), nb_epoch=self.num_training_epochs, 
                    validation_data=(self.xs_val, self.ys_val),
                    callbacks=[self.checkpointer, self.history])

        if self.verbose:
            end_time = time.time()
            self.print_time(start_time, end_time,'training model')
项目:geom_rcnn    作者:asbroad    | 项目源码 | 文件源码
def finetune_model(self):

        if self.verbose:
            print 'training model ... '
            start_time = time.time()

        self.checkpointer = ModelCheckpoint(filepath=self.weights_filename, verbose=1, save_best_only=True)
        self.history = History()

        self.model.fit_generator(self.datagen.flow(self.xs_train, self.ys_train, batch_size=32),
                    samples_per_epoch=len(self.xs_train), nb_epoch=self.num_training_epochs,
                    validation_data=(self.xs_val, self.ys_val),
                    callbacks=[self.checkpointer, self.history])

        if self.verbose:
            end_time = time.time()
            self.print_time(start_time, end_time,'training model')
项目:mycroft    作者:wpm    | 项目源码 | 文件源码
def embedding_model_train_predict_evaluate(self, model):
        # Train
        history = model.train(self.texts, self.labels, epochs=2, batch_size=10, validation_fraction=0.1,
                              model_directory=self.model_directory, verbose=0)
        self.assertIsInstance(history, History)
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "model.hd5")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "classifier.pk")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "description.txt")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "history.json")))
        # Predict
        loaded_model = load_embedding_model(self.model_directory)
        self.assertTrue(isinstance(loaded_model, model.__class__))
        n = len(self.texts)
        label_probabilities, predicted_labels = loaded_model.predict(self.texts)
        self.assertEqual((n, 2), label_probabilities.shape)
        self.assertEqual(numpy.dtype("float32"), label_probabilities.dtype)
        self.assertEqual(n, len(predicted_labels))
        self.assertTrue(set(predicted_labels).issubset({"Joyce", "Kafka"}))
        # Evaluate
        scores = loaded_model.evaluate(self.texts, self.labels)
        self.is_loss_and_accuracy(scores)
项目:mpi_learn    作者:duanders    | 项目源码 | 文件源码
def init_callbacks(self, for_worker=False):
        """Prepares all keras callbacks to be used in training.
            Automatically attaches a History callback to the end of the callback list.
            If for_worker is True, leaves out callbacks that only make sense 
            with validation enabled."""
        import keras.callbacks as cbks
        remove_for_worker = [cbks.EarlyStopping, cbks.ModelCheckpoint]
        if for_worker:
            for obj in remove_for_worker:
                self.callbacks_list = [ c for c in self.callbacks_list 
                        if not isinstance(c, obj) ]
        self.model.history = cbks.History()
        self.callbacks = cbks.CallbackList( self.callbacks_list + [self.model.history] )

        # it's possible to callback a different model than self
        # (used by Sequential models)
        if hasattr(self.model, 'callback_model') and self.model.callback_model:
            self.callback_model = self.model.callback_model
        else:
            self.callback_model = self.model
        self.callbacks.set_model(self.callback_model)
        self.callback_model.stop_training = False
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def my_model(X_train, y_train, X_test, y_test):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 3
    ################### Model ################

    ######### begin model ########
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # fc layer
    model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    ########################################################################
    checkpoint = ModelCheckpoint("weights/hypmodel2_maha1_noep{0}_batch{1}_seq_{2}.hdf5".format(
        no_epochs, batch, line_length), monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='min')

    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08,
                      clipvalue={{choice([0, 1, 2, 3, 4, 5, 6, 7])}})
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    history = History()
    # fit model
    model.fit(X_train, y_train, batch_size=batch, nb_epoch=no_epochs,
              validation_split=0.2, callbacks=[history, checkpoint])

    score, acc = model.evaluate(X_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def main():
    global_start_time = time.time()
    print('> Loading data... ')
    # mm_scaler, X_train, y_train, X_test, y_test = load_data()
    X_train, y_train, X_test, y_test = load_data()
    print('> Data Loaded. Compiling...')

    model = build_model()
    print(model.summary())

    # keras.callbacks.History????epochs?loss?val_loss
    hist = History()
    model.fit(X_train, y_train, batch_size=Conf.BATCH_SIZE, epochs=Conf.EPOCHS, shuffle=True,
              validation_split=0.05, callbacks=[hist])

    # ???????loss?val_loss
    print(hist.history['loss'])
    print(hist.history['val_loss'])

    # ?????loss?val_loss
    plot_loss(hist.history['loss'], hist.history['val_loss'])
    # predicted = predict_by_days(model, X_test, 20)
    predicted = predict_by_day(model, X_test)

    print('Training duration (s) : ', time.time() - global_start_time)

    # predicted = inverse_trans(mm_scaler, predicted)
    # y_test = inverse_trans(mm_scaler, y_test)

    # ????
    model_evaluation(pd.DataFrame(predicted), pd.DataFrame(y_test))

    # ???????
    model_visualization(y_test, predicted)
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def main():
    global_start_time = time.time()
    print('> Loading data... ')
    # mm_scaler, X_train, y_train, X_test, y_test = load_data()
    X_train, y_train, X_test, y_test = load_data()
    print('> Data Loaded. Compiling...')

    model = build_model()
    print(model.summary())

    # keras.callbacks.History????epochs?loss?val_loss
    hist = History()
    model.fit(X_train, y_train, batch_size=Conf.BATCH_SIZE, epochs=Conf.EPOCHS, shuffle=True,
              validation_split=0.05, callbacks=[hist])

    # ???????loss?val_loss
    print(hist.history['loss'])
    print(hist.history['val_loss'])

    # ?????loss?val_loss
    plot_loss(hist.history['loss'], hist.history['val_loss'])
    # predicted = predict_by_days(model, X_test, 20)
    predicted = predict_by_day(model, X_test)

    print('Training duration (s) : ', time.time() - global_start_time)

    # predicted = inverse_trans(mm_scaler, predicted)
    # y_test = inverse_trans(mm_scaler, y_test)

    # ????
    model_evaluation_multi_step(pd.DataFrame(predicted), pd.DataFrame(y_test))

    # ???????
    model_visulaization_multi_step(y_test, predicted)
项目:mycroft    作者:wpm    | 项目源码 | 文件源码
def test_bag_of_words_with_validation_data(self):
        model = BagOfWordsClassifier((self.texts, self.labels, self.label_names))
        history = model.train(self.texts, self.labels, epochs=2, batch_size=10,
                              validation_data=(self.texts, self.labels),
                              model_directory=self.model_directory, verbose=0)
        self.assertIsInstance(history, History)
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "model.hd5")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "classifier.pk")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "description.txt")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "history.json")))
项目:mycroft    作者:wpm    | 项目源码 | 文件源码
def embedding_model_train_without_validation(self, model):
        history = model.train(self.texts, self.labels, epochs=2, batch_size=10, model_directory=self.model_directory,
                              verbose=0)
        self.assertIsInstance(history, History)
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "model.hd5")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "classifier.pk")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "description.txt")))
        self.assertTrue(os.path.exists(os.path.join(self.model_directory, "history.json")))
项目:StockRecommendSystem    作者:doncat99    | 项目源码 | 文件源码
def train_data(self, data_feature, window, LabelColumnName):
        # history = History()

        #X_train, y_train, X_test, y_test = self.prepare_train_test_data(data_feature, LabelColumnName)
        X_train, y_train, X_test, y_test = self.prepare_train_data(data_feature, LabelColumnName)
        model = self.build_model(window, X_train, y_train, X_test, y_test)

        model.fit(
            X_train,
            y_train,
            batch_size=self.paras.batch_size,
            epochs=self.paras.epoch,
            # validation_split=self.paras.validation_split,
            # validation_data = (X_known_lately, y_known_lately),
            # callbacks=[history],
            # shuffle=True,
            verbose=self.paras.verbose
        )
        # save model
        self.save_training_model(model, window)
        recall_train, tmp = self.predict(model, X_train, y_train)
        # print('train recall is', recall_train)
        # print(' ############## validation on test data ############## ')
        recall_test, tmp = self.predict(model, X_test, y_test)
        # print('test recall is',recall_test)

        # plot training loss/ validation loss
        if self.paras.plot:
            self.plot_training_curve(history)

        return model


    ###################################
    ###                             ###
    ###         Predicting          ###
    ###                             ###
    ###################################
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def fit(self, x, y, batch_size=32, epochs=1, verbose=1, callbacks=None,
            validation_split=0.0, validation_data=None, steps_per_epoch=None):
        """Create an `InMemoryDataset` instance with the given data and train
        the model using importance sampling for a given number of epochs.

        Arguments
        ---------
            x: Numpy array of training data or list of numpy arrays
            y: Numpy array of target data
            batch_size: int, number of samples per gradient update
            epochs: int, number of times to iterate over the entire
                    training set
            verbose: {0, >0}, whether to employ the progress bar Keras
                     callback or not
            callbacks: list of Keras callbacks to be called during training
            validation_split: float in [0, 1), percentage of data to use for
                              evaluation
            validation_data: tuple of numpy arrays, Data to evaluate the
                             trained model on without ever training on them
            steps_per_epoch: int or None, number of gradient updates before
                             considering an epoch has passed
        Returns
        -------
            A Keras `History` object that contains information collected during
            training.
        """
        # Create two data tuples from the given x, y, validation_*
        if validation_data is not None:
            x_train, y_train = x, y
            x_test, y_test = validation_data

        elif validation_split > 0:
            assert validation_split < 1, "100% of the data used for testing"
            n = int(round(validation_split * len(x)))
            idxs = np.arange(len(x))
            np.random.shuffle(idxs)
            x_train, y_train = x[idxs[n:]], y[idxs[n:]]
            x_test, y_test = x[idxs[:n]], y[idxs[:n]]

        else:
            x_train, y_train = x, y
            x_test, y_test = np.empty(shape=(0, 1)), np.empty(shape=(0, 1))

        # Make the dataset to train on
        dataset = InMemoryDataset(
            x_train,
            y_train,
            x_test,
            y_test,
            categorical=False  # this means use the targets as is
        )

        return self.fit_dataset(
            dataset=dataset,
            batch_size=batch_size,
            epochs=epochs,
            steps_per_epoch=steps_per_epoch,
            verbose=verbose,
            callbacks=callbacks
        )
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def fit(self, x, y, batch_size=32, epochs=1, verbose=1, callbacks=None,
            validation_split=0.0, validation_data=None, steps_per_epoch=None):
        """Create an `InMemoryDataset` instance with the given data and train
        the model using importance sampling for a given number of epochs.

        Arguments
        ---------
            x: Numpy array of training data or list of numpy arrays
            y: Numpy array of target data
            batch_size: int, number of samples per gradient update
            epochs: int, number of times to iterate over the entire
                    training set
            verbose: {0, >0}, whether to employ the progress bar Keras
                     callback or not
            callbacks: list of Keras callbacks to be called during training
            validation_split: float in [0, 1), percentage of data to use for
                              evaluation
            validation_data: tuple of numpy arrays, Data to evaluate the
                             trained model on without ever training on them
            steps_per_epoch: int or None, number of gradient updates before
                             considering an epoch has passed
        Returns
        -------
            A Keras `History` object that contains information collected during
            training.
        """
        # Create two data tuples from the given x, y, validation_*
        if validation_data is not None:
            x_train, y_train = x, y
            x_test, y_test = validation_data

        elif validation_split > 0:
            assert validation_split < 1, "100% of the data used for testing"
            n = int(round(validation_split * len(x)))
            idxs = np.arange(len(x))
            np.random.shuffle(idxs)
            x_train, y_train = x[idxs[n:]], y[idxs[n:]]
            x_test, y_test = x[idxs[:n]], y[idxs[:n]]

        else:
            x_train, y_train = x, y
            x_test, y_test = np.empty(shape=(0, 1)), np.empty(shape=(0, 1))

        # Make the dataset to train on
        dataset = InMemoryDataset(
            x_train,
            y_train,
            x_test,
            y_test,
            categorical=False  # this means use the targets as is
        )

        return self.fit_dataset(
            dataset=dataset,
            batch_size=batch_size,
            epochs=epochs,
            steps_per_epoch=steps_per_epoch,
            verbose=verbose,
            callbacks=callbacks
        )
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def fit(self, x, y, batch_size=32, epochs=1, verbose=1, callbacks=None,
            validation_split=0.0, validation_data=None, steps_per_epoch=None):
        """Create an `InMemoryDataset` instance with the given data and train
        the model using importance sampling for a given number of epochs.

        Arguments
        ---------
            x: Numpy array of training data or list of numpy arrays
            y: Numpy array of target data
            batch_size: int, number of samples per gradient update
            epochs: int, number of times to iterate over the entire
                    training set
            verbose: {0, >0}, whether to employ the progress bar Keras
                     callback or not
            callbacks: list of Keras callbacks to be called during training
            validation_split: float in [0, 1), percentage of data to use for
                              evaluation
            validation_data: tuple of numpy arrays, Data to evaluate the
                             trained model on without ever training on them
            steps_per_epoch: int or None, number of gradient updates before
                             considering an epoch has passed
        Returns
        -------
            A Keras `History` object that contains information collected during
            training.
        """
        # Create two data tuples from the given x, y, validation_*
        if validation_data is not None:
            x_train, y_train = x, y
            x_test, y_test = validation_data

        elif validation_split > 0:
            assert validation_split < 1, "100% of the data used for testing"
            n = int(round(validation_split * len(x)))
            idxs = np.arange(len(x))
            np.random.shuffle(idxs)
            x_train, y_train = x[idxs[n:]], y[idxs[n:]]
            x_test, y_test = x[idxs[:n]], y[idxs[:n]]

        else:
            x_train, y_train = x, y
            x_test, y_test = np.empty(shape=(0, 1)), np.empty(shape=(0, 1))

        # Make the dataset to train on
        dataset = InMemoryDataset(
            x_train,
            y_train,
            x_test,
            y_test,
            categorical=False  # this means use the targets as is
        )

        return self.fit_dataset(
            dataset=dataset,
            batch_size=batch_size,
            epochs=epochs,
            steps_per_epoch=steps_per_epoch,
            verbose=verbose,
            callbacks=callbacks
        )
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def fit(self, x, y, batch_size=32, epochs=1, verbose=1, callbacks=None,
            validation_split=0.0, validation_data=None, steps_per_epoch=None):
        """Create an `InMemoryDataset` instance with the given data and train
        the model using importance sampling for a given number of epochs.

        Arguments
        ---------
            x: Numpy array of training data or list of numpy arrays
            y: Numpy array of target data
            batch_size: int, number of samples per gradient update
            epochs: int, number of times to iterate over the entire
                    training set
            verbose: {0, >0}, whether to employ the progress bar Keras
                     callback or not
            callbacks: list of Keras callbacks to be called during training
            validation_split: float in [0, 1), percentage of data to use for
                              evaluation
            validation_data: tuple of numpy arrays, Data to evaluate the
                             trained model on without ever training on them
            steps_per_epoch: int or None, number of gradient updates before
                             considering an epoch has passed
        Returns
        -------
            A Keras `History` object that contains information collected during
            training.
        """
        # Create two data tuples from the given x, y, validation_*
        if validation_data is not None:
            x_train, y_train = x, y
            x_test, y_test = validation_data

        elif validation_split > 0:
            assert validation_split < 1, "100% of the data used for testing"
            n = int(round(validation_split * len(x)))
            idxs = np.arange(len(x))
            np.random.shuffle(idxs)
            x_train, y_train = x[idxs[n:]], y[idxs[n:]]
            x_test, y_test = x[idxs[:n]], y[idxs[:n]]

        else:
            x_train, y_train = x, y
            x_test, y_test = np.empty(shape=(0, 1)), np.empty(shape=(0, 1))

        # Make the dataset to train on
        dataset = InMemoryDataset(
            x_train,
            y_train,
            x_test,
            y_test,
            categorical=False  # this means use the targets as is
        )

        return self.fit_dataset(
            dataset=dataset,
            batch_size=batch_size,
            epochs=epochs,
            steps_per_epoch=steps_per_epoch,
            verbose=verbose,
            callbacks=callbacks
        )
项目:JohaNN    作者:naoyak    | 项目源码 | 文件源码
def train_model(midi_files, save_path, model_path=None, step_size=3, phrase_len=20, layer_size=128, batch_size=128, nb_epoch=1):

    melody_corpus, melody_set, notes_indices, indices_notes = build_corpus(midi_files)

    corpus_size = len(melody_set)

    # cut the corpus into semi-redundant sequences of max_len values
    # step_size = 3
    # phrase_len = 20
    phrases = []
    next_notes = []
    for i in range(0, len(melody_corpus) - phrase_len, step_size):
        phrases.append(melody_corpus[i: i + phrase_len])
        next_notes.append(melody_corpus[i + phrase_len])
    print('nb sequences:', len(phrases))

    # transform data into binary matrices
    X = np.zeros((len(phrases), phrase_len, corpus_size), dtype=np.bool)
    y = np.zeros((len(phrases), corpus_size), dtype=np.bool)
    for i, phrase in enumerate(phrases):
        for j, note in enumerate(phrase):
            X[i, j, notes_indices[note]] = 1
        y[i, notes_indices[next_notes[i]]] = 1
    if model_path is None:
        model = Sequential()
        model.add(LSTM(layer_size, return_sequences=True, input_shape=(phrase_len, corpus_size)))
        model.add(Dropout(0.2))
        model.add(LSTM(layer_size, return_sequences=False))
        model.add(Dropout(0.2))
        model.add(Dense(corpus_size))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer=RMSprop())

    else:
        model = load_model(model_path)

    checkpoint = ModelCheckpoint(filepath=save_path,
        verbose=1, save_best_only=False)
    history = History()
    model.fit(X, y, batch_size=batch_size, nb_epoch=nb_epoch, callbacks=[checkpoint, history])

    return model, melody_corpus, melody_set, notes_indices, indices_notes