Python tensorflow 模块,summary() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用tensorflow.summary()

项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def close(self):
        """Close the simulation, freeing resources.

        Notes
        -----
        The simulation cannot be restarted after it is closed.  This is not a
        technical limitation, just a design decision made for all Nengo
        simulators.
        """

        if not self.closed:
            # note: we use getattr in case it crashes before the object is
            # created
            if getattr(self, "sess", None) is not None:
                self.sess.close()
            self.sess = None

            if getattr(self, "summary", None) is not None:
                self.summary.close()

            self.closed = True
项目:gym-sandbox    作者:suqi    | 项目源码 | 文件源码
def learn_actor(self, s, x_ma, epoch):   # batch update
        _, police_grads = self.sess.run(self.train_ops, feed_dict={S: s, X_MA: x_ma})
        # the following method for soft replace target params is computational expansive
        # target_params = (1-tau) * target_params + tau * eval_params
        # self.sess.run([tf.assign(t, (1 - self.tau) * t + self.tau * e) for t, e in zip(self.t_params, self.e_params)])

        summary = tf.Summary()
        # summary.value.add(tag='info/c_gradient{}'.format(self.agent_id), simple_value=float(_c_grad))
        summary.value.add(tag='info/police_grads{}'.format(self.agent_id), simple_value=np.mean([np.mean(_) for _ in police_grads]))
        writer.add_summary(summary, epoch)
        writer.flush()

        # instead of above method, I use a hard replacement here
        if self.t_replace_counter % self.t_replace_iter == 0:
            self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
        self.t_replace_counter += 1
项目:gym-sandbox    作者:suqi    | 项目源码 | 文件源码
def learn_actor(self, s, x_ma, epoch):   # batch update
        _, police_grads = self.sess.run(self.train_ops, feed_dict={S: s, X_MA: x_ma})
        # the following method for soft replace target params is computational expansive
        # target_params = (1-tau) * target_params + tau * eval_params
        # self.sess.run([tf.assign(t, (1 - self.tau) * t + self.tau * e) for t, e in zip(self.t_params, self.e_params)])

        summary = tf.Summary()
        # summary.value.add(tag='info/c_gradient{}'.format(self.agent_id), simple_value=float(_c_grad))
        summary.value.add(tag='info/police_grads{}'.format(self.agent_id), simple_value=np.mean([np.mean(_) for _ in police_grads]))
        writer.add_summary(summary, epoch)
        writer.flush()

        # instead of above method, I use a hard replacement here
        if self.t_replace_counter % self.t_replace_iter == 0:
            self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
        self.t_replace_counter += 1
项目:Neural-Architecture-Search-with-RL    作者:dhruvramani    | 项目源码 | 文件源码
def __init__(self, config):
        self.config = config
        self.data = DataSet(self.config)
        self.add_placeholders()
        self.summarizer = tf.summary
        self.net = Network(config)
        self.saver = tf.train.Saver()
        self.epoch_count, self.second_epoch_count = 0, 0
        self.outputs, self.prob = self.net.neural_search()
        self.hyperparams = self.net.gen_hyperparams(self.outputs)
        self.hype_list = [1 for i in range(self.config.hyperparams)] #[7, 7, 24, 5, 5, 36, 3, 3, 48, 64]
        self.reinforce_loss = self.net.REINFORCE(self.prob)
        self.tr_cont_step = self.net.train_controller(self.reinforce_loss, self.val_accuracy)
        self.cNet, self.y_pred = self.init_child(self.hype_list)
        self.cross_loss, self.accuracy, self.tr_model_step = self.grow_child()
        self.init = tf.global_variables_initializer()
        self.local_init = tf.local_variables_initializer()
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def tf_log(summary, collection=SCALAR_SUMMARIES):
    """Add tf.summary object to collection named collection"""
    tf.add_to_collection(collection, summary)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        if self.model.validation_data and self.histogram_freq:
            if epoch % self.histogram_freq == 0:
                # TODO: implement batched calls to sess.run
                # (current call will likely go OOM on GPU)
                if self.model.uses_learning_phase:
                    cut_v_data = len(self.model.inputs)
                    val_data = self.model.validation_data[:cut_v_data] + [0]
                    tensors = self.model.inputs + [K.learning_phase()]
                else:
                    val_data = self.model.validation_data
                    tensors = self.model.inputs
                feed_dict = dict(zip(tensors, val_data))
                result = self.sess.run([self.merged], feed_dict=feed_dict)
                summary_str = result[0]
                self.writer.add_summary(summary_str, epoch)

        for name, value in logs.items():
            if name in ['batch', 'size']:
                continue
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.writer.add_summary(summary, epoch)
        self.writer.flush()
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        if self.model.validation_data and self.histogram_freq:
            if epoch % self.histogram_freq == 0:
                # TODO: implement batched calls to sess.run
                # (current call will likely go OOM on GPU)
                if self.model.uses_learning_phase:
                    cut_v_data = len(self.model.inputs)
                    val_data = self.model.validation_data[:cut_v_data] + [0]
                    tensors = self.model.inputs + [K.learning_phase()]
                else:
                    val_data = self.model.validation_data
                    tensors = self.model.inputs
                feed_dict = dict(zip(tensors, val_data))
                result = self.sess.run([self.merged], feed_dict=feed_dict)
                summary_str = result[0]
                self.writer.add_summary(summary_str, epoch)

        for name, value in logs.items():
            if name in ['batch', 'size']:
                continue
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.writer.add_summary(summary, epoch)
        self.writer.flush()
项目:BDD_Driving_Model    作者:gy20073    | 项目源码 | 文件源码
def visualize(self, net_inputs, net_outputs):
        # input a batch of training examples of form [Tensor1, Tensor2, ... Tensor_n]
        # net_inputs: usually images; net_outputs: usually the labels
        # this function visualize the data that is read in, do not return anything but use tf.summary

        # visualize the video using multiple images
        # their is no way to visualize time sequence now, so isvalid and isstop couldn't be visualized
        if not FLAGS.no_image_input:
            decoded = net_inputs[0]
            visualize = tf.cast(decoded[0,:,:,:,:], tf.uint8)
            tf.image_summary("video_seq", visualize, max_images=FLAGS.n_sub_frame)
项目:BDD_Driving_Model    作者:gy20073    | 项目源码 | 文件源码
def visualize(self, net_inputs, net_outputs):
    # input a batch of training examples of form [Tensor1, Tensor2, ... Tensor_n]
    # net_inputs: usually images; net_outputs: usually the labels
    # this function visualize the data that is read in, do not return anything but use tf.summary
    raise NotImplemented()
项目:gym-sandbox    作者:suqi    | 项目源码 | 文件源码
def learn_critic(self, x_ma, a_ma, r, x2_ma, s, a, s2, epoch=0):
        # ATTENTION!!!!
        # the key point is that we use constant a_ma to replace critic's tensor: self.a_ma
        # here we must replace this tensor, otherwise whole network crash
        # because critic must use constant a_ma to do gradient,
        # while actor must use its network tensor a_ma to do gradient
        # this is the trick!!
        _c_grad, _c_loss, _a_grads = self.sess.run(
            self.train_ops, feed_dict={X_MA: x_ma, self.a_ma: a_ma,
                                       R: r, X2_MA: x2_ma,
                                       S: s, S2: s2})

        summary = tf.Summary()
        # summary.value.add(tag='info/c_gradient{}'.format(self.agent_id),
        #                   simple_value=float(_c_grad))
        summary.value.add(tag='info/c_loss{}'.format(self.agent_id), simple_value=float(_c_loss))
        writer.add_summary(summary, epoch)
        writer.flush()



        # the following method for soft replace target params is computational expansive
        # target_params = (1-tau) * target_params + tau * eval_params
        # self.sess.run([tf.assign(t, (1 - self.tau) * t + self.tau * e) for t, e in zip(self.t_params, self.e_params)])

        # instead of above method, we use a hard replacement here
        if self.t_replace_counter % self.t_replace_iter == 0:
            self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
        self.t_replace_counter += 1


# -------------------  Memory -------------------
项目:Neural-Architecture-Search-with-RL    作者:dhruvramani    | 项目源码 | 文件源码
def add_summaries(self, sess):
        if self.config.load:
            path_ = "../results/tensorboard"
        else :
            path_ = "../bin/results/tensorboard"
        summary_writer_train = tf.summary.FileWriter(path_ + "/train", sess.graph)
        summary_writer_val = tf.summary.FileWriter(path_ + "/val", sess.graph)
        summary_writer_test = tf.summary.FileWriter(path_+ "/test", sess.graph)
        summary_writers = {'train': summary_writer_train, 'val': summary_writer_val, 'test': summary_writer_test}
        return summary_writers
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    if hasattr(tf, 'histogram_summary'):
                        tf.histogram_summary(weight.name, weight)
                    else:
                        tf.summary.histogram(weight.name, weight)

                    if self.write_images:
                        w_img = tf.squeeze(weight)

                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)

                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)

                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)

                        if hasattr(tf, 'image_summary'):
                            tf.image_summary(weight.name, w_img)
                        else:
                            tf.summary.image(weight.name, w_img)

                if hasattr(layer, 'output'):
                    if hasattr(tf, 'histogram_summary'):
                        tf.histogram_summary('{}_out'.format(layer.name),
                                             layer.output)
                    else:
                        tf.summary.histogram('{}_out'.format(layer.name),
                                             layer.output)

        if hasattr(tf, 'merge_all_summaries'):
            self.merged = tf.merge_all_summaries()
        else:
            self.merged = tf.summary.merge_all()

        if self.write_graph:
            if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
                self.writer = tf.summary.FileWriter(self.log_dir,
                                                    self.sess.graph)
            elif parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
                self.writer = tf.summary.FileWriter(self.log_dir)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    if hasattr(tf, 'histogram_summary'):
                        tf.histogram_summary(weight.name, weight)
                    else:
                        tf.summary.histogram(weight.name, weight)

                    if self.write_images:
                        w_img = tf.squeeze(weight)

                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)

                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)

                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)

                        if hasattr(tf, 'image_summary'):
                            tf.image_summary(weight.name, w_img)
                        else:
                            tf.summary.image(weight.name, w_img)

                if hasattr(layer, 'output'):
                    if hasattr(tf, 'histogram_summary'):
                        tf.histogram_summary('{}_out'.format(layer.name),
                                             layer.output)
                    else:
                        tf.summary.histogram('{}_out'.format(layer.name),
                                             layer.output)

        if hasattr(tf, 'merge_all_summaries'):
            self.merged = tf.merge_all_summaries()
        else:
            self.merged = tf.summary.merge_all()

        if self.write_graph:
            if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
                self.writer = tf.summary.FileWriter(self.log_dir,
                                                    self.sess.graph)
            elif parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
                self.writer = tf.summary.FileWriter(self.log_dir)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir)
项目:gym-sandbox    作者:suqi    | 项目源码 | 文件源码
def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_iter,
                 a_ma, a2_ma, agent_id, agent_num):
        self.agent_id = agent_id
        self.agent_num = agent_num

        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.lr = learning_rate
        self.gamma = gamma
        self.t_replace_iter = t_replace_iter
        self.t_replace_counter = 0

        with tf.variable_scope('Critic{}'.format(self.agent_id)):
            # Input (s, a), output q
            local_a = a_ma[agent_id]
            self.a_ma = tf.concat(a_ma, axis=1)
            self.q = self._build_critic_net(X_MA, self.a_ma, 'eval_net', trainable=True)

            # Input (s_, a_), output q_ for q_target
            a2_ma = tf.concat(a2_ma, axis=1)
            self.q_ = self._build_critic_net(X2_MA, a2_ma, 'target_net', trainable=False)    # target_q is based on a_ from Actor's target_net

            self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic{}/eval_net'.format(agent_id))
            self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic{}/target_net'.format(agent_id))

        with tf.variable_scope('target_q{}'.format(self.agent_id)):
            self.target_q = R + self.gamma * self.q_

        with tf.variable_scope('TD_error{}'.format(self.agent_id)):
            self.loss = tf.reduce_mean(tf.squared_difference(self.target_q, self.q))  # MSE

        with tf.variable_scope('C_train{}'.format(self.agent_id)):
            self.train_ops = []
            self.train_ops.append(tf.train.AdamOptimizer(self.lr).minimize(
                self.loss, var_list=self.e_params))  # C train only update c network, don't update a
            self.train_ops.append(self.loss)  # for tf.summary

        with tf.variable_scope('a_grad{}'.format(self.agent_id)):
            # tensor of gradients of each sample (None, a_dim)
            self.a_grads = tf.gradients(self.q, local_a)[0]  # only get dq/da, throw dq/dw
            self.train_ops.append(self.a_grads)
项目:gym-sandbox    作者:suqi    | 项目源码 | 文件源码
def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_iter,
                 a_ma, a2_ma, agent_id, agent_num):
        self.agent_id = agent_id
        self.agent_num = agent_num

        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.lr = learning_rate
        self.gamma = gamma
        self.t_replace_iter = t_replace_iter
        self.t_replace_counter = 0

        with tf.variable_scope('Critic{}'.format(self.agent_id)):
            # Input (s, a), output q
            local_a = a_ma[agent_id]
            self.a_ma = tf.concat(a_ma, axis=1)
            self.q = self._build_critic_net(X_MA, self.a_ma, 'eval_net', trainable=True)

            # Input (s_, a_), output q_ for q_target
            a2_ma = tf.concat(a2_ma, axis=1)
            self.q_ = self._build_critic_net(X2_MA, a2_ma, 'target_net', trainable=False)    # target_q is based on a_ from Actor's target_net

            self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic{}/eval_net'.format(agent_id))
            self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic{}/target_net'.format(agent_id))

        with tf.variable_scope('target_q{}'.format(self.agent_id)):
            self.target_q = R + self.gamma * self.q_

        with tf.variable_scope('TD_error{}'.format(self.agent_id)):
            self.loss = tf.reduce_mean(tf.squared_difference(self.target_q, self.q))  # MSE

        with tf.variable_scope('C_train{}'.format(self.agent_id)):
            self.train_ops = []
            self.train_ops.append(tf.train.AdamOptimizer(self.lr).minimize(
                self.loss, var_list=self.e_params))  # C train only update c network, don't update a
            self.train_ops.append(self.loss)  # for tf.summary

        with tf.variable_scope('a_grad{}'.format(self.agent_id)):
            # tensor of gradients of each sample (None, a_dim)
            self.a_grads = tf.gradients(self.q, local_a)[0]  # only get dq/da, throw dq/dw
            self.train_ops.append(self.a_grads)