Python keras.backend 模块,clear_session() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用keras.backend.clear_session()

项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def keras_test(func):
    """Function wrapper to clean up after TensorFlow tests.

    # Arguments
        func: test function to clean up after.

    # Returns
        A function wrapping the input function.
    """
    @six.wraps(func)
    def wrapper(*args, **kwargs):
        output = func(*args, **kwargs)
        if K.backend() == 'tensorflow':
            K.clear_session()
        return output
    return wrapper
项目:Two-Stream-Convolutional-Networks    作者:Yorwxue    | 项目源码 | 文件源码
def train():
    # stack_optical_flow(file_directory, data_update=False)
    with open(pickle_directory + 'class_index_dict.pickle', 'rb') as fr:
        class_index_dict = pickle.load(fr)
    # num_of_classes = int(len(class_index_dict) / 2)
    # seed = [random.random() for i in range(num_of_classes)]

    print('Training temporal model.')
    train_temporal_model(class_index_dict)
    gc.collect()

    # release memory
    # ------------------------
    K.clear_session()
    # sess = tf.Session()
    # K.set_session(sess)
    # ------------------------

    # print('Training spatial model.')
    # train_spatial_model(class_index_dict)
    # gc.collect()

    print('ok.')
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_build_predict_func(self, get_model):
        """Test the build of a model"""
        new_session()
        X_tr = np.ones((train_samples, input_dim))
        model = get_model()
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        model_name = model.__class__.__name__

        pred_func = KTB.build_predict_func(model)

        tensors = [X_tr]
        if model_name != 'Model':
            tensors.append(1.)

        res = pred_func(tensors)

        assert len(res[0]) == len(X_tr)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_fit(self, get_model):
        "Test the training of a serialized model"
        new_session()
        data, data_val = make_data(train_samples, test_samples)

        model = get_model()
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        model_dict = dict()
        model_dict['model_arch'] = to_dict_w_opt(model)

        res = KTB.train(copy.deepcopy(model_dict['model_arch']), [data],
                        [data_val], [])
        res = KTB.fit(NAME, VERSION, model_dict, [data], 'test', [data_val],
                      [])

        assert len(res) == 4

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_predict(self, get_model):
        """Test to predict using the backend"""
        new_session()
        data, data_val = make_data(train_samples, test_samples)
        model = get_model()
        model.compile(optimizer='sgd', loss='categorical_crossentropy')

        expe = Experiment(model)
        expe.fit([data], [data_val])
        KTB.predict(expe.model_dict, [data['X']], False)
        KTB.predict(expe.model_dict, [data['X']], True)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:provenance    作者:bmabey    | 项目源码 | 文件源码
def test_reloading_from_disk_has_same_value_id(dbdiskrepo):

    data = mnist_data()
    model = basic_model()
    compiled_model = compile_model(model)
    fitted_model = fit_model(compiled_model, data['X_train'], data['Y_train'])

    K.clear_session()

    reloaded_model = p.load_proxy(fitted_model.artifact.id)

    assert reloaded_model.artifact.value_id == p.hash(reloaded_model.artifact.value)


# this gets to the core of deterministic training by TF (or theano)
# not sure how best to do it and question the value of it so for now
# I am not going to worry about it.
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def reload_session():
    clear_session()
    load_session()
项目:minos    作者:guybedo    | 项目源码 | 文件源码
def train(self, blueprint, device,
              save_best_model=False, model_filename=None):
        try:
            model = self.model_builder.build(
                blueprint,
                device)
            setup_tf_session(device)
            nb_epoch, callbacks = self._get_stopping_parameters(blueprint)
            if save_best_model:
                callbacks.append(self._get_model_save_callback(
                    model_filename,
                    blueprint.training.metric.metric))
            start = time()
            history = model.fit_generator(
                self.batch_iterator,
                self.batch_iterator.samples_per_epoch,
                nb_epoch,
                callbacks=callbacks,
                validation_data=self.test_batch_iterator,
                nb_val_samples=self.test_batch_iterator.sample_count)
            if save_best_model:
                del model
                model = load_keras_model(model_filename)
            return model, history, (time() - start)
        except Exception as ex:
            logging.debug(ex)
            logging.debug(traceback.format_exc())
        try:
            from keras import backend
            backend.clear_session()
        except:
            logging.debug(ex)
            logging.debug(traceback.format_exc())
        return None, None, 0
项目:devise-keras    作者:priyamtejaswin    | 项目源码 | 文件源码
def main():
    RUN_TIME = sys.argv[1]


    if RUN_TIME == "TRAIN":
        image_features = Input(shape=(4096,))
        model = build_model(image_features)
        print model.summary()

        # number of training images 
        _num_train = get_num_train_images()

        # Callbacks 
        # remote_cb = RemoteMonitor(root='http://localhost:9000')
        tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
        epoch_cb    = EpochCheckpoint(folder="./snapshots/")
        valid_cb    = ValidCallBack()

        # fit generator
        steps_per_epoch = math.ceil(_num_train/float(BATCH))
        print "Steps per epoch i.e number of iterations: ",steps_per_epoch

        train_datagen = data_generator(batch_size=INCORRECT_BATCH, image_class_ranges=TRAINING_CLASS_RANGES)
        history = model.fit_generator(
                train_datagen,
                steps_per_epoch=steps_per_epoch,
                epochs=250,
                callbacks=[tensorboard, valid_cb]
            )
        print history.history.keys()


    elif RUN_TIME == "TEST":
        from keras.models import load_model 
        model = load_model("snapshots/epoch_49.hdf5", custom_objects={"hinge_rank_loss":hinge_rank_loss})

    K.clear_session()
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def new_session():
    if K.backend() == 'tensorflow':  # pragma: no cover
        import tensorflow as tf
        K.clear_session()
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        session = tf.Session(config=config)
        K.set_session(session)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_fit(self, get_model, get_loss_metric,
                            get_custom_l, get_callback_fix):
        new_session()
        data, data_val = make_data(train_samples, test_samples)
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        expe = Experiment(model)

        for mod in [None, model]:
            for data_val_loc in [None, data_val]:
                expe.fit([data], [data_val_loc], model=mod, nb_epoch=2,
                         batch_size=batch_size, metrics=metrics,
                         custom_objects=cust_objects, overwrite=True,
                         callbacks=get_callback_fix)

        expe.backend_name = 'another_backend'
        expe.load_model()
        expe.load_model(expe.mod_id, expe.data_id)

        assert expe.data_id is not None
        assert expe.mod_id is not None
        assert expe.params_dump is not None

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_fit_gen(self, get_model, get_loss_metric,
                                get_custom_l, get_callback_fix):
        new_session()
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        model_name = model.__class__.__name__
        _, data_val_use = make_data(train_samples, test_samples)
        expe = Experiment(model)

        for val in [1, data_val_use]:
            gen, data, data_stream = make_gen(batch_size)
            if val == 1:
                val, data_2, data_stream_2 = make_gen(batch_size)
            expe.fit_gen([gen], [val], nb_epoch=2,
                         model=model,
                         metrics=metrics,
                         custom_objects=cust_objects,
                         samples_per_epoch=64,
                         nb_val_samples=128,
                         verbose=2, overwrite=True,
                         callbacks=get_callback_fix)

            close_gens(gen, data, data_stream)
            if val == 1:
                close_gens(val, data_2, data_stream_2)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_fit_gen_async(self, get_model, get_loss_metric,
                                      get_custom_l):
        new_session()
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        _, data_val_use = make_data(train_samples, test_samples)
        expe = Experiment(model)

        expected_value = 2
        for val in [None, 1, data_val_use]:
            gen, data, data_stream = make_gen(batch_size)
            if val == 1:
                val, data_2, data_stream_2 = make_gen(batch_size)
            _, thread = expe.fit_gen_async([gen], [val], nb_epoch=2,
                                           model=model,
                                           metrics=metrics,
                                           custom_objects=cust_objects,
                                           samples_per_epoch=64,
                                           nb_val_samples=128,
                                           verbose=2, overwrite=True)

            thread.join()

            for k in expe.full_res['metrics']:
                if 'iter' not in k:
                    assert len(
                        expe.full_res['metrics'][k]) == expected_value

            close_gens(gen, data, data_stream)
            if val == 1:
                close_gens(val, data_2, data_stream_2)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_deserialization(self):
        new_session()
        model = sequential()
        model.compile(optimizer='sgd', loss='categorical_crossentropy')
        ser_mod = to_dict_w_opt(model)
        custom_objects = {'test_loss': [1, 2]}
        custom_objects = {k: serialize(custom_objects[k])
                          for k in custom_objects}
        model_from_dict_w_opt(ser_mod, custom_objects=custom_objects)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:Hyperopt-Keras-CNN-CIFAR-100    作者:guillaume-chevalier    | 项目源码 | 文件源码
def plot_base_and_best_models():
    """Plot a demo model."""
    space_base_demo_to_plot = {
        'lr_rate_mult': 1.0,
        'l2_weight_reg_mult': 1.0,
        'batch_size': 300,
        'optimizer': 'Nadam',
        'coarse_labels_weight': 0.2,
        'conv_dropout_drop_proba': 0.175,
        'fc_dropout_drop_proba': 0.3,
        'use_BN': True,

        'first_conv': 4,
        'residual': 4,
        'conv_hiddn_units_mult': 1.0,
        'nb_conv_pool_layers': 3,
        'conv_pool_res_start_idx': 0.0,
        'pooling_type': 'inception',
        'conv_kernel_size': 3.0,
        'res_conv_kernel_size': 3.0,

        'fc_units_1_mult': 1.0,
        'one_more_fc': 1.0,
        'activation': 'elu'
    }
    space_best_model = {
        "activation": "elu",
        "batch_size": 320.0,
        "coarse_labels_weight": 0.3067103474295116,
        "conv_dropout_drop_proba": 0.25923531175521264,
        "conv_hiddn_units_mult": 1.5958302613876916,
        "conv_kernel_size": 3.0,
        "conv_pool_res_start_idx": 0.0,
        "fc_dropout_drop_proba": 0.4322253354921089,
        "fc_units_1_mult": 1.3083964454436132,
        "first_conv": 3,
        "l2_weight_reg_mult": 0.41206755600055983,
        "lr_rate_mult": 0.6549347353077412,
        "nb_conv_pool_layers": 3,
        "one_more_fc": None,
        "optimizer": "Nadam",
        "pooling_type": "avg",
        "res_conv_kernel_size": 2.0,
        "residual": 3.0,
        "use_BN": True
    }

    model = build_model(space_base_demo_to_plot)
    plot_model(model, to_file='model_demo.png', show_shapes=True)
    print("Saved base model visualization to model_demo.png.")
    K.clear_session()
    del model

    model = build_model(space_best_model)
    plot_model(model, to_file='model_best.png', show_shapes=True)
    print("Saved best model visualization to model_best.png.")
    K.clear_session()
    del model
项目:textfool    作者:bogdan-kulynych    | 项目源码 | 文件源码
def build_model(max_length=1000,
                nb_filters=64,
                kernel_size=3,
                pool_size=2,
                regularization=0.01,
                weight_constraint=2.,
                dropout_prob=0.4,
                clear_session=True):
    if clear_session:
        K.clear_session()

    model = Sequential()
    model.add(Embedding(
        embeddings.shape[0],
        embeddings.shape[1],
        input_length=max_length,
        trainable=False,
        weights=[embeddings]))

    model.add(Conv1D(nb_filters, kernel_size, activation='relu'))
    model.add(Conv1D(nb_filters, kernel_size, activation='relu'))
    model.add(MaxPooling1D(pool_size))

    model.add(Dropout(dropout_prob))

    model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu'))
    model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu'))
    model.add(MaxPooling1D(pool_size))

    model.add(Dropout(dropout_prob))

    model.add(GlobalAveragePooling1D())
    model.add(Dense(1,
        kernel_regularizer=l2(regularization),
        kernel_constraint=maxnorm(weight_constraint),
        activation='sigmoid'))

    model.compile(
        loss='binary_crossentropy',
        optimizer='rmsprop',
        metrics=['accuracy'])

    return model
项目:zhihu_kanshan_cup_2017    作者:coderSkyChen    | 项目源码 | 文件源码
def predmodel(self, modelname, datatuple, topic_dict_inv):

        import time
        cur_time = time.strftime('%Y-%m-%d-%H-%M', time.localtime(time.time()))
        from collections import Counter

        def tmpfunc(x):
            if len(x) > 5:
                c = Counter(x).most_common(5)
                res = []
                for num, count in c:
                    res.append(topic_dict_inv[num])
            else:
                res = []
                for i in x:
                    res.append(topic_dict_inv[i])

            return res

        predlabels = []
        # titleword_array, dspword_array, ques_ids= datatuple
        titlechar_array, titleword_array, dspchar_array, dspword_array, ques_ids = datatuple

        for i in range(len(modelname)):
            self.model = load_model(modelname[i])
            predlabel = self.model.predict([titlechar_array, titleword_array, dspchar_array, dspword_array],
                                           batch_size=512, verbose=1)
            # predlabel = self.model.predict([titleword_array, titleword_array, dspword_array, dspword_array], batch_size=512, verbose=1)
            # np.savetxt("result/scores/"+cur_time + "scores_4RCNN_gru_dense_nodropout.txt", predlabel, fmt='%s')
            np.save("result/scores/" + cur_time + "4RCNN_lstm512_4part_title_dsp_attention_nofc_06epoch", predlabel)
            # exit()
            predlabel = np.argsort(-predlabel)[:, :5]
            if len(predlabels) == 0:
                predlabels = predlabel
            else:
                predlabels = np.column_stack((predlabels, predlabel))
            print(predlabels.shape)
            K.clear_session()

        with open("result/" + cur_time + ".csv", 'w') as f:
            for i in range(predlabels.shape[0]):
                # f.write(ques_ids[i] + "," + ','.join([topic_dict_inv[k] for k in predlabels[i]]) + '\n')
                f.write(ques_ids[i] + "," + ','.join(tmpfunc(predlabels[i])) + '\n')
项目:HyPRec    作者:mostafa-mahmoud    | 项目源码 | 文件源码
def _train(self):
        """
        Train the stacked denoising autoencoders.
        """
        if 'fold' in self.hyperparameters:
            current_fold = self.hyperparameters['fold'] + 1
        else:
            current_fold = 0
        term_freq = self.abstracts_preprocessor.get_term_frequency_sparse_matrix().todense()
        self.get_cnn()
        if self._verbose:
            print("CNN is constructed...")
        error = numpy.inf
        iterations = 0
        batchsize = 2048
        for epoch in range(1, 1 + self.n_iter):
            self.document_distribution = self.predict_sdae(term_freq)
            t0 = time.time()
            self.user_vecs = self.als_step(self.user_vecs, self.item_vecs, self.train_data, self._lambda, type='user')
            self.item_vecs = self.als_step(self.item_vecs, self.user_vecs, self.train_data, self._lambda, type='item')
            t1 = time.time()
            iterations += 1
            if self._verbose:
                error = self.evaluator.get_rmse(self.user_vecs.dot(self.item_vecs.T), self.train_data)
                if current_fold == 0:
                    logs = dict(it=iterations, epoch=epoch, loss=error, time=(t1 - t0))
                    print('Iteration:{it:05d} Epoch:{epoch:02d} Loss:{loss:1.4e} Time:{time:.3f}s'.format(**logs))
                else:
                    logs = dict(fold=current_fold, it=iterations, epoch=epoch, loss=error, time=(t1 - t0))
                    print('Fold:{fold:02d} Iteration:{it:05d} Epoch:{epoch:02d} Loss:{loss:1.4e} '
                          'Time:{time:.3f}s'.format(**logs))

            for inp_batch, item_batch in chunks(batchsize, term_freq, self.item_vecs):
                t0 = time.time()
                loss = self.train_sdae(inp_batch, item_batch)
                t1 = time.time()
                iterations += 1
                if self._verbose:
                    if current_fold == 0:
                        msg = ('Iteration:{it:05d} Epoch:{epoch:02d} Loss:{loss:1.3e} Time:{tim:.3f}s')
                        logs = dict(loss=float(loss), epoch=epoch, it=iterations, tim=(t1 - t0))
                        print(msg.format(**logs))
                    else:
                        msg = ('Fold:{fold:02d} Iteration:{it:05d} Epoch:{epoch:02d} Loss:{loss:1.3e} Time:{tim:.3f}s')
                        logs = dict(fold=current_fold, loss=float(loss), epoch=epoch, it=iterations, tim=(t1 - t0))
                        print(msg.format(**logs))
            error = self.evaluator.get_rmse(self.user_vecs.dot(self.item_vecs.T), self.train_data)

        self.document_distribution = self.predict_sdae(term_freq)
        rms = self.evaluate_sdae(term_freq, self.item_vecs)

        if self._verbose:
            print(rms)
        # Garbage collection for keras
        backend.clear_session()
        if self._verbose:
            print("SDAE trained...")
        return rms
项目:DQN    作者:jjakimoto    | 项目源码 | 文件源码
def __init__(self, config):
        """initialized approximate value function

        config should have the following attributes

        Args:
            device: the device to use computation, e.g. '/gpu:0'
            gamma(float): the decay rate for value at RL
            history_length(int): input_length for each scale at CNN
            n_feature(int): the number of type of input 
                (e.g. the number of company to use at stock trading)
            trade_stock_idx(int): trading stock index
            gam (float): discount factor
            n_history(int): the nubmer of history that will be used as input
            n_smooth, n_down(int): the number of smoothed and down sampling input at CNN
            k_w(int): the size of filter at CNN
            n_hidden(int): the size of fully connected layer
            n_batch(int): the size of mini batch
            n_epochs(int): the training epoch for each time
            update_rate (0, 1): parameter for soft update
            learning_rate(float): learning rate for SGD
            memory_length(int): the length of Replay Memory
            n_memory(int): the number of different Replay Memories
            alpha, beta: [0, 1] parameters for Prioritized Replay Memories
            action_scale(float): the scale of initialized ation
        """
        self.device = config.device
        self.save_path = config.save_path
        self.is_load = config.is_load
        self.gamma = config.gamma
        self.history_length = config.history_length
        self.n_stock = config.n_stock
        self.n_smooth = config.n_smooth
        self.n_down = config.n_down
        self.n_batch = config.n_batch
        self.n_epoch = config.n_epoch
        self.update_rate = config.update_rate
        self.alpha = config.alpha
        self.beta = config.beta
        self.lr = config.learning_rate
        self.memory_length = config.memory_length
        self.n_memory = config.n_memory
        self.noise_scale = config.noise_scale
        self.model_config = config.model_config
        # the length of the data as input
        self.n_history = max(self.n_smooth + self.history_length, (self.n_down + 1) * self.history_length)
        print ("building model....")
        # have compatibility with new tensorflow
        tf.python.control_flow_ops = tf
        # avoid creating _LEARNING_PHASE outside the network
        K.clear_session()
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
        K.set_session(self.sess)
        with self.sess.as_default():
            with tf.device(self.device):
                self.build_model()
        print('finished building model!')
项目:DQN    作者:jjakimoto    | 项目源码 | 文件源码
def __init__(self, config):
        """initialized approximate value function

        config should have the following attributes

        Args:
            device: the device to use computation, e.g. '/gpu:0'
            gamma(float): the decay rate for value at RL
            history_length(int): input_length for each scale at CNN
            n_feature(int): the number of type of input 
                (e.g. the number of company to use at stock trading)
            n_history(int): the nubmer of history that will be used as input
            n_smooth, n_down(int): the number of smoothed and down sampling input at CNN
            k_w(int): the size of filter at CNN
            n_hidden(int): the size of fully connected layer
            n_batch(int): the size of mini batch
            n_epochs(int): the training epoch for each time
            update_rate (0, 1): parameter for soft update
            learning_rate(float): learning rate for SGD
            memory_length(int): the length of Replay Memory
            n_memory(int): the number of different Replay Memories
            alpha, beta: [0, 1] parameters for Prioritized Replay Memories
        """
        self.device = config.device
        self.save_path = config.save_path
        self.is_load = config.is_load
        self.gamma = config.gamma
        self.history_length = config.history_length
        self.n_stock = config.n_stock
        self.n_feature = config.n_feature
        self.n_smooth = config.n_smooth
        self.n_down = config.n_down
        self.k_w = config.k_w
        self.n_hidden = config.n_hidden
        self.n_batch = config.n_batch
        self.n_epochs = config.n_epochs
        self.update_rate = config.update_rate
        self.alpha = config.alpha
        self.beta = config.beta
        self.lr = config.learning_rate
        self.memory_length = config.memory_length
        self.n_memory = config.n_memory
        # the length of the data as input
        self.n_history = max(self.n_smooth + self.history_length, (self.n_down + 1) * self.history_length)
        print ("building model....")
        # have compatibility with new tensorflow
        tf.python.control_flow_ops = tf
        # avoid creating _LEARNING_PHASE outside the network
        K.clear_session()
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
        K.set_session(self.sess)
        with self.sess.as_default():
            with tf.device(self.device):
                self.build_model()
        print('finished building model!')
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_fit_async(self, get_model, get_loss_metric,
                                  get_custom_l, get_callback_fix):
        new_session()
        data, data_val = make_data(train_samples, test_samples)
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        cust_objects['test_list'] = [1, 2]
        expe = Experiment(model)

        expected_value = 2
        for mod in [None, model]:
            for data_val_loc in [None, data_val]:
                _, thread = expe.fit_async([data], [data_val_loc],
                                           model=mod, nb_epoch=2,
                                           batch_size=batch_size,
                                           metrics=metrics,
                                           custom_objects=cust_objects,
                                           overwrite=True,
                                           verbose=2,
                                           callbacks=get_callback_fix)

                thread.join()

                for k in expe.full_res['metrics']:
                    if 'iter' not in k:
                        assert len(
                            expe.full_res['metrics'][k]) == expected_value

                if data_val_loc is not None:
                    for k in expe.full_res['metrics']:
                        if 'val' in k and 'iter' not in k:
                            assert None not in expe.full_res['metrics'][k]
                else:
                    for k in expe.full_res['metrics']:
                        if 'val' in k and 'iter' not in k:
                            assert all([np.isnan(v)
                                        for v in expe.full_res['metrics'][k]])

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)