Python keras.backend 模块,set_session() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.set_session()

项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size,
                 batch_size, tau, learning_rate):
        """Init critic network."""
        self.sess = sess
        self.batch_size = batch_size
        self.tau = tau
        self.learning_rate = learning_rate
        self.action_size = action_size

        K.set_session(sess)

        self.model, self.action, self.state = \
            self.create_critic_network(state_size, action_size)
        self.target_model, self.target_action, self.target_state = \
            self.create_critic_network(state_size, action_size)
        self.action_grads = tf.gradients(self.model.output, self.action)
        self.sess.run(tf.initialize_all_variables())
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE

        K.set_session(sess)

        #Now create the model
        self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)   
        self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) 
        self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
        self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)
        grads = zip(self.params_grad, self.weights)
        self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)
        self.sess.run(tf.global_variables_initializer())
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE

        K.set_session(sess)

        #Now create the model
        self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)   
        self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) 
        self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
        self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)
        grads = zip(self.params_grad, self.weights)
        self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)
        self.sess.run(tf.global_variables_initializer())
项目:mnist_LeNet    作者:LuxxxLucy    | 项目源码 | 文件源码
def create(cls, **kwargs):
        """
        Create a new model session.

        :param kwargs: optional graph parameters
        :type kwargs: dict
        :return: new model session
        :rtype: ModelSession
        """
        session = tf.Session()
        from keras import backend as K
        K.set_session(session)
        with session.graph.as_default():
            cls.create_graph(**kwargs)
        session.run(tf.global_variables_initializer())
        return cls(session, tf.train.Saver(), kwargs['args'])
项目:ddpg-ros-keras    作者:robosamir    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE

        K.set_session(sess)

        #Now create the model
        self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)   
        self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) 
        self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
        self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)
        grads = zip(self.params_grad, self.weights)
        self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)
        self.sess.run(tf.initialize_all_variables())
项目:Two-Stream-Convolutional-Networks    作者:Yorwxue    | 项目源码 | 文件源码
def train():
    # stack_optical_flow(file_directory, data_update=False)
    with open(pickle_directory + 'class_index_dict.pickle', 'rb') as fr:
        class_index_dict = pickle.load(fr)
    # num_of_classes = int(len(class_index_dict) / 2)
    # seed = [random.random() for i in range(num_of_classes)]

    print('Training temporal model.')
    train_temporal_model(class_index_dict)
    gc.collect()

    # release memory
    # ------------------------
    K.clear_session()
    # sess = tf.Session()
    # K.set_session(sess)
    # ------------------------

    # print('Training spatial model.')
    # train_spatial_model(class_index_dict)
    # gc.collect()

    print('ok.')
项目:openai_lab    作者:kengz    | 项目源码 | 文件源码
def configure_hardware(RAND_SEED):
    '''configure rand seed, GPU'''
    from keras import backend as K
    if K.backend() == 'tensorflow':
        K.tf.set_random_seed(RAND_SEED)
    else:
        K.theano.tensor.shared_randomstreams.RandomStreams(seed=RAND_SEED)

    if K.backend() != 'tensorflow':
        # GPU config for tf only
        return

    process_num = PARALLEL_PROCESS_NUM if args.param_selection else 1
    tf = K.tf
    gpu_options = tf.GPUOptions(
        allow_growth=True,
        per_process_gpu_memory_fraction=1./float(process_num))
    config = tf.ConfigProto(
        gpu_options=gpu_options,
        allow_soft_placement=True)
    sess = tf.Session(config=config)
    K.set_session(sess)
    return sess
项目:rldurak    作者:janEbert    | 项目源码 | 文件源码
def __init__(
            self, sess, state_shape, action_shape, load=True, optimizer='adam',
            alpha=0.001, epsilon=1e-8, tau=0.001, neurons_per_layer=[100, 50]):
        """Initialize a critic with the given session, learning rate,
        update factor and neurons in the hidden layers.

        If load is true, load the model instead of creating a new one.
        """
        self.sess = sess
        self.state_shape = state_shape
        self.action_shape = action_shape
        self.optimizer_choice = optimizer.lower()
        self.alpha = alpha
        self.tau = tau
        if len(neurons_per_layer) < 2:
            if not neurons_per_layer:
                self.neurons_per_layer = [100, 50]
            else:
                self.neurons_per_layer.append(50)
            print('Neurons per layer for the critic have been adjusted')
        else:
            self.neurons_per_layer = neurons_per_layer
        K.set_session(sess)
        self.model, self.state_input, self.action_input = self.create_model(
                epsilon)
        self.target_model = self.create_model(epsilon)[0]
        self.action_gradients = K.gradients(self.model.output,
                self.action_input)
        self.sess.run(tf.global_variables_initializer())
        if load:
            self.load_weights()
        self.model._make_predict_function()
        self.target_model._make_predict_function()
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def train(self, train, valid, batch_size, **kwargs):
        X_train, y_train = train
        X_valid, y_valid = valid
        steps = len(X_train)/batch_size

        tensorboard_callback = TensorBoardCallback(batch_size)
        checkpoint = ModelCheckpoint(filepath=self.model_save_filepath)
        batch_end_callback = BatchEndCallback(self.neptune_organizer)
        epoch_end_callback = EpochEndCallback(self.neptune_organizer, 
                                              image_model = self.facenet, test_data=(X_valid, y_valid))

        self.facenet.fit_generator(self.datagen.flow(X_train, y_train, batch_size),
                          steps_per_epoch=steps,
                          validation_data=[X_valid, y_valid],
                          callbacks=[batch_end_callback, epoch_end_callback, tensorboard_callback, 
                                     checkpoint],
                          **kwargs)  
        K.set_session(self.old_session)
项目:ycml    作者:skylander86    | 项目源码 | 文件源码
def set_session(self, tf_config=None):
        if tf_config is None:
            tf_config = self.tf_config

        if tf_config is None:
            n_jobs = getattr(self, 'n_jobs', 1)
            log_device_placement = getattr(self, 'log_device_placement', logger.getEffectiveLevel() <= logging.DEBUG)
            tf_config = tf.ConfigProto(inter_op_parallelism_threads=n_jobs, intra_op_parallelism_threads=n_jobs, log_device_placement=log_device_placement, allow_soft_placement=True)
        #end if

        tf_session = tf.Session(config=tf_config)
        K.set_session(tf_session)

        init_op = tf.global_variables_initializer()
        tf_session.run(init_op)
    #end def
项目:ycml    作者:skylander86    | 项目源码 | 文件源码
def load_from_tarfile(self, tar_file):
        self.set_session()

        fname = None
        try:
            with NamedTemporaryFile(suffix='.h5', delete=False) as f:
                timer = Timer()
                shutil.copyfileobj(tar_file.extractfile('nn_model.h5'), f)
                fname = f.name
            #end with

            self.nn_model_ = load_model(fname, custom_objects=self.custom_objects)
            logger.debug('Loaded neural network model weights {}.'.format(timer))

        finally:
            if fname:
                os.remove(fname)
        #end try

        return self
    #end def
项目:DDPG-Keras-Torcs    作者:yanpanlau    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE

        K.set_session(sess)

        #Now create the model
        self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)   
        self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) 
        self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
        self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)
        grads = zip(self.params_grad, self.weights)
        self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)
        self.sess.run(tf.initialize_all_variables())
项目:gym-sandbox    作者:suqi    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE

        K.set_session(sess)

        #Now create the model
        self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)   
        self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) 
        self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
        self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)
        grads = zip(self.params_grad, self.weights)
        self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)
        self.sess.run(tf.initialize_all_variables())
项目:reinforcement-learning    作者:cgnicholls    | 项目源码 | 文件源码
def __init__(self, session, action_size, h, w, channels, optimizer=tf.train.AdamOptimizer(1e-4)):
        self.action_size = action_size
        self.optimizer = optimizer
        self.sess = session
        K.set_session(self.sess)

        with tf.variable_scope('online_network'):
            self.action = tf.placeholder('int32', [None], name='action')
            self.reward = tf.placeholder('float32', [None], name='reward')
            model, self.state, self.q_vals = self._build_model(h, w, channels)
            self.weights = model.trainable_weights

        with tf.variable_scope('optimizer'):
            action_one_hot = tf.one_hot(self.action, self.action_size, 1.0, 0.0)
            q_val = tf.reduce_sum(tf.multiply(self.q_vals, action_one_hot), reduction_indices=1)
            self.loss = tf.reduce_mean(tf.square(self.reward - q_val))
            grads = tf.gradients(self.loss, self.weights)
            grads, _ = tf.clip_by_global_norm(grads, 40.0)
            grads_vars = list(zip(grads, self.weights))
            self.train_op = optimizer.apply_gradients(grads_vars)
        with tf.variable_scope('target_network'):
            target_model, self.target_state, self.target_q_vals = self._build_model(h, w, channels)
            target_weights = target_model.trainable_weights
        with tf.variable_scope('target_update'):
            self.target_update = [target_weights[i].assign(self.weights[i]) for i in range(len(target_weights))]
项目:reinforcement-learning    作者:cgnicholls    | 项目源码 | 文件源码
def __init__(self, session, action_size, h, w, channels, optimizer=tf.train.AdamOptimizer(1e-4)):
        self.action_size = action_size
        self.optimizer = optimizer
        self.sess = session
        K.set_session(self.sess)

        with tf.variable_scope('online_network'):
            self.action = tf.placeholder('int32', [None], name='action')
            self.reward = tf.placeholder('float32', [None], name='reward')
            model, self.state, self.q_vals = self._build_model(h, w, channels)
            self.weights = model.trainable_weights

        with tf.variable_scope('optimizer'):
            action_one_hot = tf.one_hot(self.action, self.action_size, 1.0, 0.0)
            q_val = tf.reduce_sum(tf.multiply(self.q_vals, action_one_hot), reduction_indices=1)
            self.loss = tf.reduce_mean(tf.square(self.reward - q_val))
            grads = tf.gradients(self.loss, self.weights)
            grads, _ = tf.clip_by_global_norm(grads, 40.0)
            grads_vars = list(zip(grads, self.weights))
            self.train_op = optimizer.apply_gradients(grads_vars)
        with tf.variable_scope('target_network'):
            target_model, self.target_state, self.target_q_vals = self._build_model(h, w, channels)
            target_weights = target_model.trainable_weights
        with tf.variable_scope('target_update'):
            self.target_update = [target_weights[i].assign(self.weights[i]) for i in range(len(target_weights))]
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __init__(self):

        self.sess = tf.Session()
        K.set_session(self.sess)

        self._build_graph()
        self._load_data()
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def load_session():
    K.set_session(
        tf.Session(
            config=tf.ConfigProto(
                allow_soft_placement=True,
                gpu_options =
                tf.GPUOptions(
                    per_process_gpu_memory_fraction=1.0,
                    allow_growth=True,))))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def load_session():
    K.set_session(
        tf.Session(
            config=tf.ConfigProto(
                allow_soft_placement=True,
                intra_op_parallelism_threads=1,
                inter_op_parallelism_threads=1,
                device_count = {'CPU': 1},
                gpu_options =
                tf.GPUOptions(
                    per_process_gpu_memory_fraction=1.0,
                    allow_growth=True,))))
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, batch_size,
                 tau, learning_rate):
        """Init actor network.

        Parameters
        ----------
        state_size : tuple
            size of the state size
        """
        self.sess = sess
        self.batch_size = batch_size
        self.tau = tau
        self.learning_rate = learning_rate
        self.action_size = action_size

        K.set_session(sess)

        # create model
        self.model, self.weights, self.state = \
            self.create_actor_network(state_size, action_size)
        self.target_model, self.target_weights, self.target_state = \
            self.create_actor_network(state_size, action_size)
        self.action_gradient = tf.placeholder(tf.float32, [None, action_size])
        self.params_grad = \
            tf.gradients(self.model.output, self.weights,
                         -self.action_gradient)
        grads = zip(self.params_grad, self.weights)
        self.optimize = \
            tf.train.AdamOptimizer(learning_rate).apply_gradients(grads)
        self.sess.run(tf.initialize_all_variables())
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE
        self.action_size = action_size

        K.set_session(sess)

        #Now create the model
        self.model, self.action, self.state = self.create_critic_network(state_size, action_size)  
        self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size)  
        self.action_grads = tf.gradients(self.model.output, self.action)  #GRADIENTS for policy update
        self.sess.run(tf.global_variables_initializer())
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE
        self.action_size = action_size

        K.set_session(sess)

        #Now create the model
        self.model, self.action, self.state = self.create_critic_network(state_size, action_size)  
        self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size)  
        self.action_grads = tf.gradients(self.model.output, self.action)  #GRADIENTS for policy update
        self.sess.run(tf.global_variables_initializer())
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE
        self.action_size = action_size

        K.set_session(sess)

        #Now create the model
        self.model, self.action, self.state = self.create_critic_network(state_size, action_size)  
        self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size)  
        self.action_grads = tf.gradients(self.model.output, self.action)  #GRADIENTS for policy update
        self.sess.run(tf.global_variables_initializer())
项目:minos    作者:guybedo    | 项目源码 | 文件源码
def setup_tf_session(device):
    import tensorflow as tf
    config = tf.ConfigProto()
    if hasattr(config, 'gpu_options'):
        config.allow_soft_placement = True
        config.gpu_options.allow_growth = True
    if is_gpu_device(device):
        config.gpu_options.visible_device_list = str(
            get_device_idx(device))
    elif is_cpu_device(device):
        config.gpu_options.visible_device_list = ''
    from keras import backend
    backend.set_session(tf.Session(config=config))
项目:batchA3C    作者:ssamot    | 项目源码 | 文件源码
def main(_):
    g = tf.Graph()
    with g.as_default(), tf.Session() as session:
        K.set_session(session)

        num_actions = get_num_actions()
        graph_ops = build_graph(num_actions)
        saver = tf.train.Saver()

        if FLAGS.testing:
            evaluation(session, graph_ops, saver)
        else:
            train(session, graph_ops, num_actions, saver)
项目:neural-style-keras    作者:robertomest    | 项目源码 | 文件源码
def config_gpu(gpu, allow_growth):
    # Choosing gpu
    if gpu == '-1':
        config = tf.ConfigProto(device_count ={'GPU': 0})
    else:
        if gpu == 'all' or gpu == '':
            gpu = ''
        config = tf.ConfigProto()
        config.gpu_options.visible_device_list = gpu
    if allow_growth == True:
        config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    K.set_session(session)
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def __enter__(self):
        self.old_session = K.get_session()
        self.g = self.requested_graph or tf.Graph()
        self.current_session = tf.Session(graph=self.g)
        K.set_session(self.current_session)
        return (self.current_session, self.g)
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def __exit__(self, exc_type, exc_val, exc_tb):
        # Restore the previous session
        K.set_session(self.old_session)
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def __enter__(self):
        self.sess.__enter__()
        if self.using_keras:
            K.set_session(self.sess)
        return self
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def __exit__(self, *args):
        if self.using_keras:
            K.set_session(self.keras_prev_sess)
        self.sess.__exit__(*args)
项目:rl-solution    作者:jinfagang    | 项目源码 | 文件源码
def main():
    if len(sys.argv) > 1 and sys.argv[1] == 't':
        dummy_show()
    else:
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)
        policy_gradient()
项目:rl-solution    作者:jinfagang    | 项目源码 | 文件源码
def main():
    if len(sys.argv) > 1 and sys.argv[1] == 't':
        dummy_show()
    else:
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)
        policy_gradient()
项目:sbrt2017    作者:igormq    | 项目源码 | 文件源码
def setup_gpu(gpu, allow_growth=False, log_device_placement=False):
    # Choosing gpu
    if gpu == '-1':
        config = tf.ConfigProto(device_count={'GPU': 0},
                                log_device_placement=log_device_placement)
    else:
        if gpu == 'all':
            gpu = ''
        config = tf.ConfigProto(log_device_placement=log_device_placement)
        config.gpu_options.visible_device_list = gpu
    if allow_growth:  # dynamic gpu memory allocation
        config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    K.set_session(session)
项目:ddpg-ros-keras    作者:robosamir    | 项目源码 | 文件源码
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
        self.sess = sess
        self.BATCH_SIZE = BATCH_SIZE
        self.TAU = TAU
        self.LEARNING_RATE = LEARNING_RATE
        self.action_size = action_size

        K.set_session(sess)

        #Now create the model
        self.model, self.action, self.state = self.create_critic_network(state_size, action_size)  
        self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size)  
        self.action_grads = tf.gradients(self.model.output, self.action)  #GRADIENTS for policy update
        self.sess.run(tf.initialize_all_variables())
项目:tdesc    作者:bkj    | 项目源码 | 文件源码
def limit_mem():
    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    cfg.gpu_options.visible_device_list="0"
    K.set_session(K.tf.Session(config=cfg))
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def config_tf():
    # reduce TF verbosity
    tf.logging.set_verbosity(tf.logging.FATAL)

    # prevent from allocating all memory
    config = tf.ConfigProto()
    config.gpu_options.allow_growth=True
    config.allow_soft_placement=True

    sess = tf.Session(config=config)
    K.set_session(sess)
项目:mpi_learn    作者:duanders    | 项目源码 | 文件源码
def build_model(self):
        import keras.backend as K
        K.set_session( K.tf.Session( config=K.tf.ConfigProto(
            allow_soft_placement=True, log_device_placement=False,
            gpu_options=K.tf.GPUOptions(
                per_process_gpu_memory_fraction=1./self.comm.Get_size()) ) ) )
        with K.tf.device(self.device):
            model = load_model(filename=self.filename, json_str=self.json_str, 
                    custom_objects=self.custom_objects, weights_file=self.weights)
        return model
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def __init__(self):
        self.session = tf.Session()
        K.set_session(self.session)
        K.manual_variable_initialization(True)

        self.model = self._build_model()
        self.graph = self._build_graph(self.model)

        self.session.run(tf.global_variables_initializer())
        self.default_graph = tf.get_default_graph()

        self.default_graph.finalize()  # avoid modifications
项目:rl-ofc    作者:DexGroves    | 项目源码 | 文件源码
def main(_):
    g = tf.Graph()
    with g.as_default(), tf.Session() as session:
        K.set_session(session)
        graph_ops = build_graph()
        saver = tf.train.Saver()

        if TRAINING:
            train(session, graph_ops, saver)
        else:
            evaluation(session, graph_ops, saver)
项目:rl-ofc    作者:DexGroves    | 项目源码 | 文件源码
def main(_):
    g = tf.Graph()
    with g.as_default(), tf.Session() as session:
        graph_ops = build_tf_graph()
        saver = tf.train.Saver()

        K.set_session(session)

        train(session, graph_ops, saver)
项目:deform-conv    作者:felixlaumon    | 项目源码 | 文件源码
def keras_set_tf_debug():
    sess = K.get_session()
    sess = tf_debug.LocalCLIDebugWrapperSession(sess)
    sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
    K.set_session(sess)
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def set_config(self):
        #Tensorflow GPU optimization
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def train_model_tensorflow(self, X_train, Y_train, s_date):
        print("training model %s model.cptk" % s_date)
        #model = BaseModel()
        def baseline_model():
            model = Sequential()
            model.add(LSTM(23, input_shape=(30, 23)))
            model.add(Dense(1, init='he_normal'))
            model.compile(loss='mean_squared_error', optimizer='adam')
            return model

        #Tensorflow GPU optimization
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)

        self.estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=20, batch_size=64, verbose=1)
        self.estimator.fit(X_train, Y_train)
        print("finish training model")
        # saving model
        if not os.path.exists('../model/keras/lstm/%s/' % s_date):
            os.makedirs('../model/keras/lstm/%s/' % s_date)
        model_name = '../model/keras/lstm/%s/model.h5' % s_date
        json_model = self.estimator.model.to_json()
        open(model_name.replace('h5', 'json'), 'w').write(json_model)
        self.estimator.model.save_weights(model_name, overwrite=True)
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def set_config(self):
        #Tensorflow GPU optimization
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def set_config(self):
        #Tensorflow GPU optimization
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def train_model_tensorflow(self, X_train, Y_train, s_date):
        print("training model %s model.cptk" % s_date)
        #model = BaseModel()
        def baseline_model():
            model = Sequential()
            model.add(Dense(200, input_dim=690, init='he_normal', activation='relu'))
            model.add(Dense(1, init='he_normal'))
            model.compile(loss='mean_squared_error', optimizer='adam')
            return model

        #Tensorflow GPU optimization
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)

        self.estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=50, batch_size=64, verbose=0)
        self.estimator.fit(X_train, Y_train)
        print("finish training model")
        # saving model
        if not os.path.exists('../model/keras/regression/%s/' % s_date):
            os.makedirs('../model/keras/regression/%s/' % s_date)
        model_name = '../model/keras/regression/%s/model.h5' % s_date
        json_model = self.estimator.model.to_json()
        open(model_name.replace('h5', 'json'), 'w').write(json_model)
        self.estimator.model.save_weights(model_name, overwrite=True)
项目:rldurak    作者:janEbert    | 项目源码 | 文件源码
def __init__(
            self, sess, state_shape, action_shape, load=True, optimizer='adam',
            alpha=0.001, epsilon=1e-8, tau=0.001, neurons_per_layer=[100, 50]):
        """Construct an actor with the given session, learning rate,
        update factor and neurons in the hidden layers.

        If load is true, load the model instead of creating a new one.
        """
        self.sess = sess
        self.state_shape = state_shape
        self.action_shape = action_shape
        self.optimizer_choice = optimizer.lower()
        self.alpha = alpha
        self.tau = tau
        if not neurons_per_layer:
            self.neurons_per_layer = [100]
            print('Neurons per layer for the actor have been adjusted')
        else:
            self.neurons_per_layer = neurons_per_layer
        K.set_session(sess)
        self.model, self.inputs, weights = self.create_model()
        self.target_model = self.create_model()[0]
        self.action_gradients = tf.placeholder(tf.float32,
                [None, self.action_shape])
        parameter_gradients = tf.gradients(self.model.output, weights,
                -self.action_gradients)
        gradients = zip(parameter_gradients, weights)
        assert self.optimizer_choice in ['adam', 'rmsprop']
        if self.optimizer_choice == 'adam':
            self.optimizer = tf.train.AdamOptimizer(
                    self.alpha, epsilon=epsilon).apply_gradients(gradients)
        else:
            self.optimizer = tf.train.RMSPropOptimizer(
                    self.alpha, epsilon=epsilon).apply_gradients(gradients)
        self.sess.run(tf.global_variables_initializer())
        if load:
            self.load_weights()
        self.model._make_predict_function()
        self.target_model._make_predict_function()
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def clean_session():
    """Create a new Graph, bind the graph to a new Session, and make that session the default."""
    graph = tf.Graph()  # create a fresh graph
    with tf.Session(graph=graph) as sess:
        K.set_session(sess)  # bind Keras
        yield sess
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def clean_session():
    """Create a new Graph, bind the graph to a new Session, and make that session the default."""
    graph = tf.Graph()  # create a fresh graph
    with tf.Session(graph=graph) as sess:
        K.set_session(sess)  # bind Keras
        yield sess
项目:async-deeprl    作者:dbobrenko    | 项目源码 | 文件源码
def __init__(self, session, action_size, h, w, channels, opt=tf.train.AdamOptimizer(1e-4)):
        """Creates Q-Learning agent
        :param session: tensorflow session
        :param action_size: (int) length of action space
        :param h: (int) input image height
        :param w: (int) input image width
        :param channels: (int) number of image channels
        :param opt: tensorflow optimizer (by default: Adam optimizer)"""
        self.action_size = action_size
        self.opt = opt
        self.global_step = tf.Variable(0, name='frame', trainable=False)
        self.frame_inc_op = self.global_step.assign_add(1, use_locking=True)
        K.set_session(session)
        self.sess = session
        with tf.variable_scope('network'):
            self.action = tf.placeholder('int32', [None], name='action')
            self.reward = tf.placeholder('float32', [None], name='reward')
            model, self.state, self.q_values = self._build_model(h, w, channels)
            self.weights = model.trainable_weights
        with tf.variable_scope('optimizer'):
            # Zero all actions, except one that was performed
            action_onehot = tf.one_hot(self.action, self.action_size, 1.0, 0.0)
            # Predict expected future reward for performed action
            q_value = tf.reduce_sum(tf.multiply(self.q_values, action_onehot), reduction_indices=1)
            # Define squared mean loss function: (y - y_)^2
            self.loss = tf.reduce_mean(tf.square(self.reward - q_value))
            # Compute gradients w.r.t. weights
            grads = tf.gradients(self.loss, self.weights)
            # Apply gradient norm clipping
            grads, _ = tf.clip_by_global_norm(grads, 40.)
            grads_vars = list(zip(grads, self.weights))
            self.train_op = opt.apply_gradients(grads_vars)
        with tf.variable_scope('target_network'):
            target_m, self.target_state, self.target_q_values = self._build_model(h, w, channels)
            target_w = target_m.trainable_weights
        with tf.variable_scope('target_update'):
            self.target_update = [target_w[i].assign(self.weights[i])
                                  for i in range(len(target_w))]
项目:language-model    作者:beamandrew    | 项目源码 | 文件源码
def __init__(self,params):
        config = tf.ConfigProto(allow_soft_placement=True)
        self.sess = tf.Session(config = config)
        K.set_session(self.sess)
        # Pull out all of the parameters
        self.batch_size = params['batch_size']
        self.seq_len = params['seq_len']
        self.vocab_size = params['vocab_size']
        self.embed_size = params['embed_size']
        self.hidden_dim = params['hidden_dim']
        self.num_layers = params['num_layers']
        with tf.device('/gpu:0'):
            # Set up the input placeholder
            self.input_seq = tf.placeholder(tf.float32, shape=[None, self.seq_len])
            # Build the RNN
            self.rnn = Embedding(self.vocab_size + 1, self.embed_size, input_length=self.seq_len)(self.input_seq)
        with tf.device('/gpu:1'):
            for l in range(self.num_layers):
                self.rnn = LSTM(output_dim=self.hidden_dim, return_sequences=True, name='rnn_1')(self.rnn)
            rnn_output = tf.unpack(self.rnn, axis=1)
            self.w_proj = tf.Variable(tf.zeros([self.vocab_size, self.hidden_dim]))
            self.b_proj = tf.Variable(tf.zeros([self.vocab_size]))
            self.output_seq = tf.placeholder(tf.int64, shape=([None, self.seq_len]))
            losses = []
            outputs = []
            for t in range(self.seq_len):
                rnn_t = rnn_output[t]
                y_t = tf.reshape(self.output_seq[:, t],[-1,1])
                step_loss = tf.nn.sampled_softmax_loss(weights=self.w_proj, biases=self.b_proj, inputs=rnn_t,
                                                       labels=y_t, num_sampled=512, num_classes=self.vocab_size)
                losses.append(step_loss)
                outputs.append(tf.matmul(rnn_t, tf.transpose(self.w_proj)) + self.b_proj)
            self.step_losses = losses
            self.output = outputs
            self.loss = tf.reduce_mean(self.step_losses)
            self.softmax = tf.nn.softmax(self.output)