Python tensorflow 模块,merge_summary() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.merge_summary()

项目:easy-tensorflow    作者:khanhptnk    | 项目源码 | 文件源码
def run(self):
    """Run evaluation."""
    # Create logging directory if not exists.
    if not os.path.isdir(self._eval_log_dir):
      os.makedirs(self._eval_log_dir)

    # Compute loss function and other evaluating metrics.
    self._initialize()

    # Visualize input images in Tensorboard.
    self._summary_ops.append(tf.image_summary("Eval_Image", self._observations, max_images=5))

    # Use `slim.evaluation.evaluation_loop` to evaluate the model periodically.
    slim.evaluation.evaluation_loop(
        master='',
        checkpoint_dir=self._train_log_dir,
        logdir=self._eval_log_dir,
        num_evals=self._config.num_batches,
        eval_op=self._metrics_to_updates.values(),
        summary_op=tf.merge_summary(self._summary_ops),
        eval_interval_secs=self._config.eval_interval_secs)
项目:easy-tensorflow    作者:khanhptnk    | 项目源码 | 文件源码
def run(self):
    """Run training."""
    # Create logging directory if not exists.
    if not os.path.isdir(self._train_log_dir):
      os.makedirs(self._train_log_dir)

    # Load data and compute loss function
    self._initialize()

    # Visualize input images in Tensorboard.
    self._summary_ops.append(tf.image_summary("Image_Train", self._observations, max_images=5))

    # Initialize optimizer.
    optimizer = tf.train.AdadeltaOptimizer(self._config.learning_rate)
    train_op = slim.learning.create_train_op(self._loss, optimizer)

    # Use `slim.learning.train` to manage training.
    slim.learning.train(train_op=train_op,
                        logdir=self._train_log_dir,
                        graph=self._graph,
                        number_of_steps=self._config.train_steps,
                        summary_op=tf.merge_summary(self._summary_ops),
                        save_summaries_secs=self._config.save_summaries_secs,
                        save_interval_secs=self._config.save_interval_secs)
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_g'):
                all_sum['hr_g'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_d'):
                all_sum['hr_d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
        self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test])
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def summarize_variables(train_vars=None, summary_collection="tflearn_summ"):
    """ summarize_variables.

    Arguemnts:
        train_vars: list of `Variable`. The variable weights to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    if not train_vars: train_vars = tf.trainable_variables()
    summaries.add_trainable_vars_summary(train_vars, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def summarize(value, type, name, summary_collection="tflearn_summ"):
    """ summarize.

    A custom summarization op.

    Arguemnts:
        value: `Tensor`. The tensor value to monitor.
        type: `str` among 'histogram', 'scalar'. The data monitoring type.
        name: `str`. A name for this summary.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'.

    """
    if tf012:
        name = name.replace(':', '_')
    summaries.get_summary(type, name, value, summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def create_summaries(self, verbose=2):
        """ Create summaries with `verbose` level """

        summ_collection = self.name + "_training_summaries"

        if verbose in [3]:
            # Summarize activations
            activations = tf.get_collection(tf.GraphKeys.ACTIVATIONS)
            summarize_activations(activations, summ_collection)
        if verbose in [2, 3]:
            # Summarize variable weights
            summarize_variables(self.train_vars, summ_collection)
        if verbose in [1, 2, 3]:
            # Summarize gradients
            summarize_gradients(self.grad, summ_collection)

        self.summ_op = merge_summary(tf.get_collection(summ_collection))
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_g'):
                all_sum['hr_g'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_d'):
                all_sum['hr_d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
        self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test])
项目:deepSpeech    作者:fordDeepDSP    | 项目源码 | 文件源码
def add_summaries(summaries, learning_rate, grads):
    """ Add summary ops"""

    # Track quantities for Tensorboard display
    summaries.append(tf.scalar_summary('learning_rate', learning_rate))
    # Add histograms for gradients.
    for grad, var in grads:
        if grad is not None:
            summaries.append(
                tf.histogram_summary(var.op.name +
                                     '/gradients', grad))
    # Add histograms for trainable variables.
    for var in tf.trainable_variables():
        summaries.append(tf.histogram_summary(var.op.name, var))

    # Build the summary operation from the last tower summaries.
    summary_op = tf.merge_summary(summaries)
    return summary_op
项目:lstm-rcnn-pedestrian-detection    作者:buffer51    | 项目源码 | 文件源码
def create_train_summaries(learning_rate, clas_loss, reg_loss, rpn_loss, clas_accuracy, clas_positive_percentage, clas_positive_accuracy, VGG16D_activations, clas_activations):
    with tf.name_scope('train'):
        learning_rate_summary = tf.scalar_summary('learning_rate', learning_rate)

        loss_clas_summary = tf.scalar_summary('loss/clas', clas_loss)
        loss_reg_summary = tf.scalar_summary('loss/reg', reg_loss)
        loss_rpn_summary = tf.scalar_summary('loss/rpn', rpn_loss)

        stat_accuracy_summary = tf.scalar_summary('stat/accuracy', clas_accuracy)
        stat_positive_percentage_summary = tf.scalar_summary('stat/positive_percentage', clas_positive_percentage)
        stat_positive_accuracy_summary = tf.scalar_summary('stat/positive_accuracy', clas_positive_accuracy)

        VGG16D_histogram = tf.histogram_summary('activations/VGG16D', VGG16D_activations)
        clas_histogram = tf.histogram_summary('activations/clas', clas_activations)

        return tf.merge_summary([learning_rate_summary, loss_clas_summary, loss_reg_summary, loss_rpn_summary, stat_accuracy_summary, stat_positive_percentage_summary, stat_positive_accuracy_summary, VGG16D_histogram, clas_histogram])
项目:needle    作者:roosephu    | 项目源码 | 文件源码
def build_train(self):

        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=tf.get_variable_scope().name)
        regularization = tf.contrib.layers.apply_regularization(
            tf.contrib.layers.l2_regularizer(0.01),
            self.variables,
        )

        self.op_grad_actions = tf.placeholder(tf.float32, [None, self.action_dim])
        self.op_loss = tf.reduce_sum(-self.op_grad_actions * self.op_actions) # + regularization
        self.op_summary = tf.merge_summary([
            tf.scalar_summary("actor loss", self.op_loss),
            tf.histogram_summary("actor", self.op_actions),
        ])

        self.op_train = tf.train.AdamOptimizer(self.learning_rate).minimize(self.op_loss)

    # def get_op_train(self):
    #     self.op_grads = tf.gradients(self.op_actions, self.variables, -self.op_grad_actions)
    #     self.op_grads2 = tf.gradients(self.op_loss, self.variables)
    #     return tf.train.AdamOptimizer(1e-4).apply_gradients(zip(self.op_grads2, self.variables))
项目:needle    作者:roosephu    | 项目源码 | 文件源码
def build_train(self):
        self.op_rewards = tf.placeholder(tf.float32, [None])

        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=tf.get_variable_scope().name)
        regularization = tf.contrib.layers.apply_regularization(
            tf.contrib.layers.l2_regularizer(0.01),
            self.variables,
        )
        self.op_loss = tf.reduce_mean((self.op_rewards - self.op_critic)**2) + regularization
        self.op_summary = tf.merge_summary([
            tf.scalar_summary("critic loss", self.op_loss),
            tf.histogram_summary("critic", self.op_critic),
        ])

        self.op_grad_actions = tf.gradients(self.op_critic, self.op_actions)[0]
        self.op_train = tf.train.AdamOptimizer(self.learning_rate).minimize(self.op_loss)
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def summarize_activations(activations, summary_collection="tflearn_summ"):
    """ summarize_activations.

    Arguemnts:
        activations: list of `Tensor`. The activations to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    summaries.add_activations_summary(activations, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def summarize_gradients(grads, summary_collection="tflearn_summ"):
    """ summarize_gradients.

    Arguemnts:
        grads: list of `Tensor`. The gradients to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    summaries.add_gradients_summary(grads, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection))
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hist_sum = tf.merge_summary(all_sum['hist'])
项目:StackGAN    作者:hanzhanggit    | 项目源码 | 文件源码
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])
项目:bi-att-flow    作者:allenai    | 项目源码 | 文件源码
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
项目:Chinese-QA    作者:distantJing    | 项目源码 | 文件源码
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, conf, images=None, scores=None, goal_pos=None, desig_pos=None):
        batchsize = int(conf['batch_size'])
        if goal_pos is None:
            self.goal_pos = goal_pos= tf.placeholder(tf.float32, name='goalpos', shape=(batchsize, 2))
        if desig_pos is None:
            self.desig_pos = desig_pos =  tf.placeholder(tf.float32, name='desig_pos_pl', shape=(batchsize, 2))
        if scores is None:
            self.scores = scores = tf.placeholder(tf.float32, name='score_pl', shape=(batchsize, 1))
        if images is None:
            self.images = images = tf.placeholder(tf.float32, name='images_pl', shape=(batchsize, 1, 64,64,3))

        self.prefix = prefix = tf.placeholder(tf.string, [])

        from value_model import construct_model

        summaries = []
        inf_scores = construct_model(conf, images, goal_pos, desig_pos)
        self.inf_scores = inf_scores
        self.loss = loss = mean_squared_error(inf_scores, scores)

        summaries.append(tf.scalar_summary(prefix + '_loss', loss))

        self.lr = tf.placeholder_with_default(conf['learning_rate'], ())

        self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
        self.summ_op = tf.merge_summary(summaries)
项目:adversarial-squad    作者:robinjia    | 项目源码 | 文件源码
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
项目:lstm-rcnn-pedestrian-detection    作者:buffer51    | 项目源码 | 文件源码
def create_test_summaries(test_placeholders):
    with tf.name_scope('test'):
        accuracy_summary = tf.scalar_summary('accuracy', test_placeholders[0])

        positive_recall_summary = tf.scalar_summary('recall/positive', test_placeholders[1])
        negative_recall_summary = tf.scalar_summary('recall/negative', test_placeholders[2])
        recall_summary = tf.scalar_summary('recall/global', test_placeholders[3])

        positive_precision_summary = tf.scalar_summary('precision/positive', test_placeholders[4])
        negative_precision_summary = tf.scalar_summary('precision/negative', test_placeholders[5])
        precision_summary = tf.scalar_summary('precision/global', test_placeholders[6])

        F_score_summary = tf.scalar_summary('F-score', test_placeholders[7])

        return tf.merge_summary([accuracy_summary, positive_recall_summary, negative_recall_summary, recall_summary, positive_precision_summary, negative_precision_summary,precision_summary, F_score_summary])
项目:AS_6Dof_Arm    作者:yao62995    | 项目源码 | 文件源码
def grad_histograms(grads_and_vars):
        s = []
        for grad, var in grads_and_vars:
            s.append(tf.histogram_summary(var.op.name + '', var))
            s.append(tf.histogram_summary(var.op.name + '/gradients', grad))
        return tf.merge_summary(s)
项目:supervised-embedding-model    作者:sld    | 项目源码 | 文件源码
def _init_summaries(self):
        self.accuracy = tf.placeholder_with_default(0.0, shape=(), name='Accuracy')
        self.accuracy_summary = tf.scalar_summary('Accuracy summary', self.accuracy)

        self.f_pos_summary = tf.histogram_summary('f_pos', self.f_pos)
        self.f_neg_summary = tf.histogram_summary('f_neg', self.f_neg)

        self.loss_summary = tf.scalar_summary('Mini-batch loss', self.loss)
        self.summary_op = tf.merge_summary(
            [
                self.f_pos_summary,
                self.f_neg_summary,
                self.loss_summary
            ]
        )
项目:npfl114    作者:ufal    | 项目源码 | 文件源码
def construct(self, hidden_layer_size):
        with self.session.graph.as_default():
            with tf.name_scope("inputs"):
                self.images = tf.placeholder(tf.float32, [None, self.WIDTH, self.HEIGHT, 1], name="images")
                self.labels = tf.placeholder(tf.int64, [None], name="labels")

            flattened_images = tf_layers.flatten(self.images, scope="preprocessing")
            hidden_layer = tf_layers.fully_connected(flattened_images, num_outputs=hidden_layer_size, activation_fn=tf.nn.relu, scope="hidden_layer")
            output_layer = tf_layers.fully_connected(hidden_layer, num_outputs=self.LABELS, activation_fn=None, scope="output_layer")
            self.predictions = tf.argmax(output_layer, 1)

            loss = tf_losses.sparse_softmax_cross_entropy(output_layer, self.labels, scope="loss")
            self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
            self.training = tf.train.AdamOptimizer().minimize(loss, global_step=self.global_step)
            self.accuracy = tf_metrics.accuracy(self.predictions, self.labels)

            # Summaries
            self.summaries = {"training": tf.merge_summary([tf.scalar_summary("train/loss", loss),
                                                            tf.scalar_summary("train/accuracy", self.accuracy)])}
            for dataset in ["dev", "test"]:
                self.summaries[dataset] = tf.scalar_summary(dataset+"/accuracy", self.accuracy)

            # Initialize variables
            self.session.run(tf.initialize_all_variables())

        # Finalize graph and log it if requested
        self.session.graph.finalize()
        if self.summary_writer:
            self.summary_writer.add_graph(self.session.graph)
项目:npfl114    作者:ufal    | 项目源码 | 文件源码
def __init__(self, rnn_cell, rnn_cell_dim, method, words, logdir, expname, threads=1, seed=42):
        # Create an empty graph and a session
        graph = tf.Graph()
        graph.seed = seed
        self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
                                                                       intra_op_parallelism_threads=threads))

        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
        self.summary_writer = tf.train.SummaryWriter("{}/{}-{}".format(logdir, timestamp, expname), flush_secs=10)

        # Construct the graph
        with self.session.graph.as_default():
            if rnn_cell == "LSTM":
                rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_cell_dim)
            elif rnn_cell == "GRU":
                rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_cell_dim)
            else:
                raise ValueError("Unknown rnn_cell {}".format(rnn_cell))

            self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
            self.sentence_lens = tf.placeholder(tf.int32, [None])
            self.forms = tf.placeholder(tf.int32, [None, None])
            self.tags = tf.placeholder(tf.int32, [None, None])

            # TODO
            # loss = ...
            # self.training = ...
            # self.predictions = ...
            # self.accuracy = ...

            self.dataset_name = tf.placeholder(tf.string, [])
            self.summary = tf.merge_summary([tf.scalar_summary(self.dataset_name+"/loss", loss),
                                             tf.scalar_summary(self.dataset_name+"/accuracy", self.accuracy)])

            # Initialize variables
            self.session.run(tf.initialize_all_variables())
项目:npfl114    作者:ufal    | 项目源码 | 文件源码
def __init__(self, logdir, experiment, threads):
        # Construct the graph
        with tf.name_scope("inputs"):
            self.images = tf.placeholder(tf.float32, [None, WIDTH, HEIGHT, 1], name="images")
            self.labels = tf.placeholder(tf.int64, [None], name="labels")
            flattened_images = layers.flatten(self.images)

        hidden_layer = layers.fully_connected(flattened_images, num_outputs=HIDDEN, activation_fn=tf.nn.relu, scope="hidden_layer")
        output_layer = layers.fully_connected(hidden_layer, num_outputs=LABELS, activation_fn=None, scope="output_layer")

        loss = losses.sparse_softmax_cross_entropy(output_layer, self.labels, scope="loss")
        self.training = layers.optimize_loss(loss, None, None, tf.train.AdamOptimizer(), summaries=['loss', 'gradients', 'gradient_norm'], name='training')

        with tf.name_scope("accuracy"):
            predictions = tf.argmax(output_layer, 1, name="predictions")
            accuracy = metrics.accuracy(predictions, self.labels)
            tf.scalar_summary("training/accuracy", accuracy)

        with tf.name_scope("confusion_matrix"):
            confusion_matrix = metrics.confusion_matrix(predictions, self.labels, weights=tf.not_equal(predictions, self.labels), dtype=tf.float32)
            confusion_image = tf.reshape(confusion_matrix, [1, LABELS, LABELS, 1])

        # Summaries
        self.summaries = {'training': tf.merge_all_summaries() }
        for dataset in ["dev", "test"]:
            self.summaries[dataset] = tf.merge_summary([tf.scalar_summary(dataset + "/accuracy", accuracy),
                                                        tf.image_summary(dataset + "/confusion_matrix", confusion_image)])

        # Create the session
        self.session = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=threads,
                                                        intra_op_parallelism_threads=threads))

        self.session.run(tf.initialize_all_variables())
        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
        self.summary_writer = tf.train.SummaryWriter("{}/{}-{}".format(logdir, timestamp, experiment), graph=self.session.graph, flush_secs=10)
        self.steps = 0
项目:npfl114    作者:ufal    | 项目源码 | 文件源码
def __init__(self, rnn_cell, rnn_cell_dim, num_chars, bow_char, eow_char, logdir, expname, threads=1, seed=42):
        # Create an empty graph and a session
        graph = tf.Graph()
        graph.seed = seed
        self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
                                                                       intra_op_parallelism_threads=threads))

        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
        self.summary_writer = tf.train.SummaryWriter("{}/{}-{}".format(logdir, timestamp, expname), flush_secs=10)

        # Construct the graph
        with self.session.graph.as_default():
            if rnn_cell == "LSTM":
                rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_cell_dim)
            elif rnn_cell == "GRU":
                rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_cell_dim)
            else:
                raise ValueError("Unknown rnn_cell {}".format(rnn_cell))

            self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
            self.sentence_lens = tf.placeholder(tf.int32, [None])
            self.form_ids = tf.placeholder(tf.int32, [None, None])
            self.forms = tf.placeholder(tf.int32, [None, None])
            self.form_lens = tf.placeholder(tf.int32, [None])
            self.lemma_ids = tf.placeholder(tf.int32, [None, None])
            self.lemmas = tf.placeholder(tf.int32, [None, None])
            self.lemma_lens = tf.placeholder(tf.int32, [None])

            # TODO
            # loss = ...
            # self.training = ...
            # self.predictions = ...
            # self.accuracy = ...

            self.dataset_name = tf.placeholder(tf.string, [])
            self.summary = tf.merge_summary([tf.scalar_summary(self.dataset_name+"/loss", loss),
                                             tf.scalar_summary(self.dataset_name+"/accuracy", self.accuracy)])

            # Initialize variables
            self.session.run(tf.initialize_all_variables())
项目:NMTS    作者:Hari-Sheng    | 项目源码 | 文件源码
def build_other_helpers(self):
        self.saver = tf.train.Saver(tf.trainable_variables())
        #self.summarizer = tf.merge_all_summaries() 
        self.summarizer = tf.merge_summary([tf.scalar_summary("Learning Rate", self.learning_rate),
                                            tf.scalar_summary("Training Loss", self.loss)])

        self.test_summarizer = tf.merge_summary([tf.scalar_summary("Validation Loss", self.valid_loss), 
                                                 tf.scalar_summary("BLEU", self.bleu)])
        self.writer = tf.train.SummaryWriter("./logs/{}".format(self.log_name.eval()),
                     self.sess.graph)
项目:personalized-dialog    作者:chaitjo    | 项目源码 | 文件源码
def _init_summaries(self):
        self.accuracy = tf.placeholder_with_default(0.0, shape=(), name='Accuracy')
        self.accuracy_summary = tf.scalar_summary('Accuracy summary', self.accuracy)

        self.f_pos_summary = tf.histogram_summary('f_pos', self.f_pos)
        self.f_neg_summary = tf.histogram_summary('f_neg', self.f_neg)

        self.loss_summary = tf.scalar_summary('Mini-batch loss', self.loss)
        self.summary_op = tf.merge_summary(
            [
                self.f_pos_summary,
                self.f_neg_summary,
                self.loss_summary
            ]
        )
项目:icnn    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, inputs, outputs, summary_ops=None, summary_writer=None, session=None):
        self._inputs = inputs if type(inputs) == list else [inputs]
        self._outputs = outputs
        self._summary_op = tf.merge_summary(summary_ops) if type(summary_ops) == list else summary_ops
        self._session = session or tf.get_default_session()
        self._writer = summary_writer
项目:icnn    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, inputs, outputs, summary_ops=None, summary_writer=None, session=None):
        self._inputs = inputs if type(inputs) == list else [inputs]
        self._outputs = outputs
        # self._summary_op = tf.merge_summary(summary_ops) if type(summary_ops) == list else summary_ops
        self._summary_op = tf.merge_summary(summary_ops) if type(summary_ops) == list else summary_ops
        self._session = session or tf.get_default_session()
        self._writer = summary_writer
项目:icnn    作者:locuslab    | 项目源码 | 文件源码
def __init__(self, inputs, outputs, summary_ops=None, summary_writer=None, session=None):
        self._inputs = inputs if type(inputs) == list else [inputs]
        self._outputs = outputs
        self._summary_op = tf.merge_summary(summary_ops) if type(summary_ops) == list else summary_ops
        self._session = session or tf.get_default_session()
        self._writer = summary_writer
项目:stance-conditional    作者:sheffieldnlp    | 项目源码 | 文件源码
def update_summary(self, sess, current_step, title, value):
        cur_summary = tf.scalar_summary(title, value)
        merged_summary_op = tf.merge_summary([cur_summary])  # if you are using some summaries, merge them
        summary_str = sess.run(merged_summary_op)
        self.summary_writer.add_summary(summary_str, current_step)
项目:RNN_Text_Classify    作者:luchi007    | 项目源码 | 文件源码
def train_step():

    print("loading the dataset...")
    config = Config()
    eval_config=Config()
    eval_config.keep_prob=1.0

    train_data,valid_data,test_data=data_helper.load_data(FLAGS.max_len,batch_size=config.batch_size)

    print("begin training")

    # gpu_config=tf.ConfigProto()
    # gpu_config.gpu_options.allow_growth=True
    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-1*FLAGS.init_scale,1*FLAGS.init_scale)
        with tf.variable_scope("model",reuse=None,initializer=initializer):
            model = RNN_Model(config=config,is_training=True)

        with tf.variable_scope("model",reuse=True,initializer=initializer):
            valid_model = RNN_Model(config=eval_config,is_training=False)
            test_model = RNN_Model(config=eval_config,is_training=False)

        #add summary
        # train_summary_op = tf.merge_summary([model.loss_summary,model.accuracy])
        train_summary_dir = os.path.join(config.out_dir,"summaries","train")
        train_summary_writer =  tf.train.SummaryWriter(train_summary_dir,session.graph)

        # dev_summary_op = tf.merge_summary([valid_model.loss_summary,valid_model.accuracy])
        dev_summary_dir = os.path.join(eval_config.out_dir,"summaries","dev")
        dev_summary_writer =  tf.train.SummaryWriter(dev_summary_dir,session.graph)

        #add checkpoint
        checkpoint_dir = os.path.abspath(os.path.join(config.out_dir, "checkpoints"))
        checkpoint_prefix = os.path.join(checkpoint_dir, "model")
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        saver = tf.train.Saver(tf.all_variables())


        tf.initialize_all_variables().run()
        global_steps=1
        begin_time=int(time.time())

        for i in range(config.num_epoch):
            print("the %d epoch training..."%(i+1))
            lr_decay = config.lr_decay ** max(i-config.max_decay_epoch,0.0)
            model.assign_new_lr(session,config.lr*lr_decay)
            global_steps=run_epoch(model,session,train_data,global_steps,valid_model,valid_data,train_summary_writer,dev_summary_writer)

            if i% config.checkpoint_every==0:
                path = saver.save(session,checkpoint_prefix,global_steps)
                print("Saved model chechpoint to{}\n".format(path))

        print("the train is finished")
        end_time=int(time.time())
        print("training takes %d seconds already\n"%(end_time-begin_time))
        test_accuracy=evaluate(test_model,session,test_data)
        print("the test data accuracy is %f"%test_accuracy)
        print("program end!")
项目:bi-att-flow    作者:allenai    | 项目源码 | 文件源码
def __init__(self, config, scope, rep=True):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, None, None], name='x')
        self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, None], name='q')
        self.cq = tf.placeholder('int32', [N, None, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
        self.y = tf.placeholder('bool', [N, None, None], name='y')
        self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        self.var_ema = None
        if rep:
            self._build_var_ema()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
项目:Adversarial_Video_Generation    作者:dyelax    | 项目源码 | 文件源码
def define_graph(self):
        """
        Sets up the model graph in TensorFlow.
        """
        with tf.name_scope('discriminator'):
            ##
            # Setup scale networks. Each will make the predictions for images at a given scale.
            ##

            self.scale_nets = []
            for scale_num in xrange(self.num_scale_nets):
                with tf.name_scope('scale_net_' + str(scale_num)):
                    scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)
                    self.scale_nets.append(DScaleModel(scale_num,
                                                       int(self.height * scale_factor),
                                                       int(self.width * scale_factor),
                                                       self.scale_conv_layer_fms[scale_num],
                                                       self.scale_kernel_sizes[scale_num],
                                                       self.scale_fc_layer_sizes[scale_num]))

            # A list of the prediction tensors for each scale network
            self.scale_preds = []
            for scale_num in xrange(self.num_scale_nets):
                self.scale_preds.append(self.scale_nets[scale_num].preds)

            ##
            # Data
            ##

            self.labels = tf.placeholder(tf.float32, shape=[None, 1], name='labels')

            ##
            # Training
            ##

            with tf.name_scope('training'):
                # global loss is the combined loss from every scale network
                self.global_loss = adv_loss(self.scale_preds, self.labels)
                self.global_step = tf.Variable(0, trainable=False, name='global_step')
                self.optimizer = tf.train.GradientDescentOptimizer(c.LRATE_D, name='optimizer')
                self.train_op = self.optimizer.minimize(self.global_loss,
                                                        global_step=self.global_step,
                                                        name='train_op')

                # add summaries to visualize in TensorBoard
                loss_summary = tf.scalar_summary('loss_D', self.global_loss)
                self.summaries = tf.merge_summary([loss_summary])
项目:Chinese-QA    作者:distantJing    | 项目源码 | 文件源码
def __init__(self, config, scope, rep=True):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, None, None], name='x')
        self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, None], name='q')
        self.cq = tf.placeholder('int32', [N, None, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
        self.y = tf.placeholder('bool', [N, None, None], name='y')
        self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        self.var_ema = None
        if rep:
            self._build_var_ema()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
项目:dist-dqn    作者:viswanathgs    | 项目源码 | 文件源码
def _init_network(self, config):
    # Placeholders
    self.x_placeholder = tf.placeholder(tf.float32, [None] + self.input_shape)
    self.q_placeholder = tf.placeholder(tf.float32, [None])
    self.action_placeholder = tf.placeholder(tf.float32, 
                                             [None, self.num_actions])

    summaries = []

    # Params and layers
    with tf.device(self.ps_device):
      params = self._init_params(
        config,
        input_shape=self.input_shape,
        output_size=self.num_actions,
        summaries=summaries,
      )
    self.q_output, reg_loss = self._init_layers(
      config,
      inputs=self.x_placeholder,
      params=params,
      summaries=summaries,
    )

    # Loss and training
    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    loss = self._init_loss(
      config,
      q=self.q_output,
      expected_q=self.q_placeholder,
      actions=self.action_placeholder,
      reg_loss=reg_loss,
      summaries=summaries,
    )
    self.train_op = self._init_optimizer(
      config,
      params=params,
      loss=loss,
      num_replicas=self.num_replicas,
      global_step=self.global_step,
      summaries=summaries,
    )

    # Target network
    self.target_q_output, self.target_update_ops = self._init_target_network(
      config,
      inputs=self.x_placeholder,
      input_shape=self.input_shape,
      output_size=self.num_actions,
      params=params,
      ps_device=self.ps_device,
      worker_device=self.worker_device,
      summaries=summaries,
    )

    # Merge all the summaries in this graph
    if summaries:
      self.summary_op = tf.merge_summary(summaries)
项目:pred225    作者:kanoh-k    | 项目源码 | 文件源码
def train(self, eval_on_test=False):
        """ Train model and save it to file.

        Train model with given hidden layers. Training data is created
        by prepare_training_data(), which must be called before this function.
        """
        tf.reset_default_graph()
        with tf.Session() as sess:
            feature_data = tf.placeholder("float", [None, self.num_predictors])
            labels = tf.placeholder("float", [None, self.num_classes])

            layers = [self.num_predictors] + self.hidden_layers + [self.num_classes]
            model = self.inference(feature_data, layers)
            cost, cost_summary_op = self.loss(model, labels)
            training_op = self.training(cost, learning_rate=0.0001)

            correct_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(labels, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

            # Merge all variable summaries and save the results to log file
            # summary_op = tf.merge_all_summaries()
            accuracy_op_train = tf.scalar_summary("Accuracy on Train", accuracy)
            summary_op_train = tf.merge_summary([cost_summary_op, accuracy_op_train])
            if eval_on_test:
                accuracy_op_test = tf.scalar_summary("Accuracy on Test", accuracy)
                summary_op_test = tf.merge_summary([accuracy_op_test])

            summary_writer = tf.train.SummaryWriter(self.log_dir + self.model_name, sess.graph)

            train_dict = {
                feature_data: self.training_predictors_tf.values,
                labels: self.training_classes_tf.values.reshape(len(self.training_classes_tf.values), self.num_classes)}

            if eval_on_test:
                test_dict = {
                    feature_data: self.test_predictors_tf.values,
                    labels: self.test_classes_tf.values.reshape(len(self.test_classes_tf.values), self.num_classes)}

            init = tf.initialize_all_variables()
            sess.run(init)

            for i in range(1, self.max_iteration):
                sess.run(training_op, feed_dict=train_dict)

                # Write summary to log
                if i % 100 == 0:
                    summary_str = sess.run(summary_op_train, feed_dict=train_dict)
                    summary_writer.add_summary(summary_str, i)
                    if eval_on_test:
                        summary_str = sess.run(summary_op_test, feed_dict=test_dict)
                        summary_writer.add_summary(summary_str, i)
                    summary_writer.flush()

                # Print current accuracy to console
                if i%5000 == 0:
                    print (i, sess.run(accuracy, feed_dict=train_dict))

            # Save trained parameters
            saver = tf.train.Saver()
            saver.save(sess, self.model_filename)
项目:adversarial-squad    作者:robinjia    | 项目源码 | 文件源码
def __init__(self, config, scope, rep=True):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, None, None], name='x')
        self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, None], name='q')
        self.cq = tf.placeholder('int32', [N, None, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
        self.y = tf.placeholder('bool', [N, None, None], name='y')
        self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        self.var_ema = None
        if rep:
            self._build_var_ema()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
项目:USF    作者:lucaswiser    | 项目源码 | 文件源码
def main(graph_path, Model, stream, validstream, continue_training=False, 
        start_model=None, start_ind=0, save_every=1):
    """Run a complete training session. Will load a saved model to continue training
    if provided. After every epoch the current model will be saved, and the tensorboard
    will graph new data.
    """  
    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-Config.init_scale,
                                                     Config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = Model(config=Config)

        tf.initialize_all_variables().run()
        saver = tf.train.Saver(max_to_keep=Config.num_models)
        if continue_training:
            print("Continuing training from saved model ",start_model)
            saver.restore(session,start_model)
        writer = tf.train.SummaryWriter(graph_path, max_queue=3) 
        last3 = []
        learning_rate = Config.learning_rate
        session.run(tf.assign(m.lr, learning_rate))
        tol = 0.001
        for i in range(start_ind, start_ind+Config.num_epochs):
            print("EPOCH: %s"%i)
            print("learning_rate: %s"%learning_rate)
            epoch_cost, median_cost, max_cost = m.run_epoch(session, stream.get_sents(), True)   
            print("Total cost for EPOCH: %s"%i)
            print(epoch_cost)
            print("Median cost: %s"%median_cost)
            print("Max cost: %s"%max_cost)
            accuracy = m.run_epoch(session, validstream.get_sents(), False)
            print("accuracy: %s"%accuracy)
            summ1 = tf.scalar_summary("epoch_cost", tf.constant(epoch_cost))
            summ2 = tf.scalar_summary("median_cost", tf.constant(median_cost))
            summ3 = tf.scalar_summary("max_cost", tf.constant(max_cost))
            summ4 = tf.scalar_summary("learning_rate", tf.constant(learning_rate))
            summ5 = tf.scalar_summary("accuracy", tf.constant(accuracy))
            merge = tf.merge_summary([summ1, summ2, summ3, summ4, summ5])
            writer.add_summary(merge.eval(), i)
            if i % save_every == 0:
                saver.save(session, model_dir + 'saved-lstm-model', global_step=i)
            if len(last3) == 3:
                h = max(last3)
                if last3[2] == h:
                    learning_rate = learning_rate/2
                    session.run(tf.assign(m.lr, learning_rate))
                elif last3[1] == h:
                    if (last3[1] - last3[2])/last3[1] < tol:
                        learning_rate = learning_rate/2
                        session.run(tf.assign(m.lr, learning_rate))
                else:
                    if (h - min(last3))/h < tol:
                        learning_rate = learning_rate/2
                        session.run(tf.assign(m.lr, learning_rate))
                last3 = last3[1:] + [median_cost]
            elif len(last3) < 3:
                last3 = last3 + [median_cost]
            else:
                raise Exception
项目:npfl114    作者:ufal    | 项目源码 | 文件源码
def construct(self, hidden_layer_size, saver=False):
        with self.session.graph.as_default():
            with tf.name_scope("inputs"):
                self.images = tf.placeholder(tf.float32, [None, self.WIDTH, self.HEIGHT, 1], name="images")
                self.labels = tf.placeholder(tf.int64, [None], name="labels")

            flattened_images = tf_layers.flatten(self.images, scope="preprocessing")
            hidden_layer = tf_layers.fully_connected(flattened_images, num_outputs=hidden_layer_size, activation_fn=tf.nn.relu, scope="hidden_layer")
            output_layer = tf_layers.fully_connected(hidden_layer, num_outputs=self.LABELS, activation_fn=None, scope="output_layer")
            self.predictions = tf.argmax(output_layer, 1)

            loss = tf_losses.sparse_softmax_cross_entropy(output_layer, self.labels, scope="loss")
            self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
            self.training = tf.train.AdamOptimizer().minimize(loss, global_step=self.global_step)
            self.accuracy = tf_metrics.accuracy(self.predictions, self.labels)

            # Summaries
            self.summaries = {"training": tf.merge_summary([tf.scalar_summary("train/loss", loss),
                                                            tf.scalar_summary("train/accuracy", self.accuracy)])}
            for dataset in ["dev", "test"]:
                self.summaries[dataset] = tf.scalar_summary(dataset+"/accuracy", self.accuracy)

            # Construct saver if requested
            if saver:
                tf.add_to_collection("public_ops", self.images)
                tf.add_to_collection("public_ops", self.labels)
                tf.add_to_collection("public_ops", self.predictions)
                tf.add_to_collection("public_ops", self.global_step)
                tf.add_to_collection("public_ops", self.training)
                tf.add_to_collection("public_ops", self.accuracy)
                tf.add_to_collection("public_ops", self.summaries["training"])
                tf.add_to_collection("public_ops", self.summaries["dev"])
                tf.add_to_collection("public_ops", self.summaries["test"])
                self.saver = tf.train.Saver(max_to_keep=None)
            else:
                self.saver = None

            # Initialize variables
            self.session.run(tf.initialize_all_variables())

        # Finalize graph and log it if requested
        self.session.graph.finalize()
        if self.summary_writer:
            self.summary_writer.add_graph(self.session.graph)

    # Load the variables and optionally also the graph (in that case it must be empty)
项目:thinstack-rl    作者:hans    | 项目源码 | 文件源码
def build_graphs(model_fn, buckets):
    graphs = {}
    global_step = tf.Variable(0, trainable=False, name="global_step")
    learning_rate = tf.placeholder(tf.float32, (), name="learning_rate")
    is_training = tf.placeholder(tf.bool, (), name="is_training")
    opt = tf.train.RMSPropOptimizer(learning_rate)

    for i, num_timesteps in enumerate(buckets):
        summaries_so_far = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        with tf.variable_scope("train/", reuse=i > 0):
            stacks, logits, ys, gradients = model_fn(num_timesteps,
                                                     is_training=is_training)

            if FLAGS.histogram_summaries:
                # Set up histogram displays
                params = set()
                for gradient, param in gradients:
                    params.add(param)
                    if gradient is not None:
                        tf.histogram_summary(gradient.name + "b%i" % num_timesteps, gradient)
                for param in params:
                    tf.histogram_summary(param.name + "b%i" % num_timesteps, param)

            # Clip gradients.
            clipped_gradients, norm = tf.clip_by_global_norm(
                    [grad for grad, param in gradients], FLAGS.grad_clip)
            clipped_gradients = zip(clipped_gradients,
                                    [param for _, param in gradients])
            tf.scalar_summary("norm_%i" % num_timesteps, norm)

            train_op = opt.apply_gradients(clipped_gradients, global_step)

            new_summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
            new_summary_ops = set(new_summary_ops) - summaries_so_far
            summary_op = tf.merge_summary(list(new_summary_ops))

            graphs[num_timesteps] = Graph(stacks, logits, ys, clipped_gradients,
                                          num_timesteps, learning_rate,
                                          train_op, summary_op,
                                          is_training)

    return graphs, global_step
项目:unrolled-GAN    作者:Zardinality    | 项目源码 | 文件源码
def main():
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    with tf.device('/gpu:1'):    
        g_loss_sum, d_loss_sum, img_sum, opt_g, opt_d, z, real_data = build_graph()
    summary_g = tf.merge_summary([g_loss_sum, img_sum])
    summary_d = tf.merge_summary([d_loss_sum, img_sum])
    saver = tf.train.Saver()
    npad = ((0, 0), (2, 2), (2, 2))
    with tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True)) as sess:
        sess.run(tf.initialize_all_variables())
        summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph)
        for i in xrange(FLAGS.max_iter_step):
            train_data = mnist.train.next_batch(FLAGS.batch_size)
            train_img = np.reshape(train_data[0], (-1, 28, 28))
            train_img = np.pad(train_img, pad_width=npad,
                               mode='constant', constant_values=0)
            train_img = np.expand_dims(train_img, -1)
            batch_z = np.random.uniform(-1, 1, [FLAGS.batch_size, FLAGS.z_dim]) \
                .astype(np.float32)
            feed_dict = {real_data[0]: train_img, z: batch_z, real_data[1]:train_data[1]}
            if i % 100 == 99:
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                _, merged = sess.run([opt_g, summary_g], feed_dict=feed_dict,
                                     options=run_options, run_metadata=run_metadata)
                summary_writer.add_summary(merged, i)
                summary_writer.add_run_metadata(
                    run_metadata, 'generator_metadata {}'.format(i), i)
                _, merged = sess.run([opt_g, summary_g], feed_dict=feed_dict,
                                     options=run_options, run_metadata=run_metadata)
                summary_writer.add_summary(merged, i)
                summary_writer.add_run_metadata(
                    run_metadata, 'second_generator_metadata {}'.format(i), i)
                _, merged = sess.run([opt_d, summary_d], feed_dict=feed_dict,
                                     options=run_options, run_metadata=run_metadata)
                summary_writer.add_summary(merged, i)
                summary_writer.add_run_metadata(
                    run_metadata, 'discriminator_metadata {}'.format(i), i)
            else:
                sess.run(opt_g, feed_dict=feed_dict)
                sess.run(opt_g, feed_dict=feed_dict)
                sess.run(opt_d, feed_dict=feed_dict)
            if i % 1000 == 999:
                saver.save(sess, os.path.join(
                    FLAGS.ckpt_dir, "model.ckpt"), global_step=i)
项目:pred_finance    作者:jjasonn0717    | 项目源码 | 文件源码
def createOP(self):
        ## cost function and training method ##
        with self.sess.graph.as_default():
            self.global_step = tf.Variable(0, name="global_step", trainable=False)
            with tf.name_scope('train'):
                self.in_action = tf.placeholder("float", [None, ACTIONS], 'input_action')
                self.targetQ = tf.placeholder("float", [None], 'input_Q')

                l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.get_collection("L2_VARIABLES")]) # l2 loss
                print([var.name for var in tf.get_collection("L2_VARIABLES")])

                readout_actionQ = tf.reduce_sum(tf.mul(self.readout, self.in_action), reduction_indices=1) # Q value of chosen action
                self.cost = tf.reduce_mean(tf.square(self.targetQ - readout_actionQ))# + 0.001*l2_loss # total cost

                # training op #
                start_l_rate = 0.0005
                decay_step = 100000
                decay_rate = 0.5
                learning_rate = tf.train.exponential_decay(start_l_rate, self.global_step, decay_step, decay_rate, staircase=False)
                grad_op = tf.train.AdamOptimizer(learning_rate=learning_rate)
                self.train_step = tf.contrib.layers.optimize_loss(loss=self.cost, 
                                                             global_step=self.global_step, 
                                                             learning_rate=0.0005, 
                                                             optimizer=grad_op, 
                                                             clip_gradients=1)
                tf.scalar_summary('learning_rate', learning_rate)
                tf.scalar_summary('l2_loss', l2_loss)

        ## training op ##
        with self.sess.graph.as_default():
            self.merged = tf.merge_all_summaries()
            self.init_op = tf.initialize_all_variables()

        ## record op ##
        with self.g_record.as_default():

            with tf.name_scope('record'):
                self.close_price = tf.placeholder('float', name='close_price')
                self.action_Qval = tf.placeholder('float', [ACTIONS], 'action_Qval')
                self.rec_revenue = tf.placeholder('float', name='revenue')

                close_price_summ = tf.scalar_summary('close_price', self.close_price)
                buy_Qval_summ = tf.scalar_summary('buy_Qval', self.action_Qval[0])
                sell_Qval_summ = tf.scalar_summary('sell_Qval', self.action_Qval[1])
                rec_revenue_summ = tf.scalar_summary('revenue', self.rec_revenue)

            self.merged_record = tf.merge_summary([close_price_summ, buy_Qval_summ, sell_Qval_summ, rec_revenue_summ])

        ## test acc op ##
        with self.g_record.as_default():

            with tf.name_scope('accuracy'):
                self.testAcc = tf.placeholder('float', name='accuracy')
                self.testRev = tf.placeholder('float', name='test_revenue')

                testAcc_summ = tf.scalar_summary('accuracy', self.testAcc)
                testRev_summ = tf.scalar_summary('test_revenue', self.testRev)
            self.merged_test = tf.merge_summary([testAcc_summ, testRev_summ])
项目:pred_finance    作者:jjasonn0717    | 项目源码 | 文件源码
def createOP(self):
        ## define the cost function ##
        with self.sess.graph.as_default():
            self.global_step = tf.Variable(0, name="global_step", trainable=False)
            with tf.name_scope('train'):
                self.in_action = tf.placeholder("float", [None, ACTIONS], 'input_action')
                self.targetQ   = tf.placeholder("float", [None], 'input_Q')
                self.sample_IS = tf.placeholder("float", [None], 'IS')

                l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()]) ## l2 loss

                readout_actionQ = tf.reduce_sum(tf.mul(self.readout, self.in_action), reduction_indices=1) ## Q value of chosen action
                self.TDErr = self.targetQ - readout_actionQ
                self.cost = tf.reduce_mean(tf.mul(tf.square(self.TDErr), self.sample_IS)) + 0.005*l2_loss ## total cost

                # training op #
                start_l_rate = 0.00025
                decay_step = 100000
                decay_rate = 0.5
                learning_rate = tf.train.exponential_decay(start_l_rate, self.global_step, decay_step, decay_rate, staircase=False)
                grad_op = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
                self.train_step = tf.contrib.layers.optimize_loss(loss=self.cost, 
                                                             global_step=self.global_step, 
                                                             learning_rate=0.00025, 
                                                             optimizer=grad_op, 
                                                             clip_gradients=1)
                tf.scalar_summary('learning_rate', learning_rate)
                tf.scalar_summary('l2_loss', l2_loss)

        ## training op ##
        with self.sess.graph.as_default():
            self.merged = tf.merge_all_summaries()
            self.init_op = tf.initialize_all_variables()

        ## record op ##
        with self.g_record.as_default():

            with tf.name_scope('record'):
                self.close_price = tf.placeholder('float', name='close_price')
                self.action_Qval = tf.placeholder('float', [ACTIONS], 'action_Qval')
                self.rec_revenue = tf.placeholder('float', name='revenue')

                close_price_summ = tf.scalar_summary('close_price', self.close_price)
                buy_Qval_summ = tf.scalar_summary('buy_Qval', self.action_Qval[0])
                sell_Qval_summ = tf.scalar_summary('sell_Qval', self.action_Qval[1])
                rec_revenue_summ = tf.scalar_summary('revenue', self.rec_revenue)

            self.merged_record = tf.merge_summary([close_price_summ, buy_Qval_summ, sell_Qval_summ, rec_revenue_summ])

        ## test acc op ##
        with self.g_record.as_default():

            with tf.name_scope('accuracy'):
                self.testAcc = tf.placeholder('float', name='accuracy')
                self.testRev = tf.placeholder('float', name='test_revenue')

                testAcc_summ = tf.scalar_summary('accuracy', self.testAcc)
                testRev_summ = tf.scalar_summary('test_revenue', self.testRev)
            self.merged_test = tf.merge_summary([testAcc_summ, testRev_summ])
项目:rascal-tensorflow    作者:stayrascal    | 项目源码 | 文件源码
def define_model(self):
        # Training computation
        print('Start use mode with train samples')
        logits = self.model(self.tf_train_samples)

        with tf.name_scope('loss'):
            self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits[0], self.tf_train_labels[:, 0]))
            self.loss += tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits[1], self.tf_train_labels[:, 1]))
            self.loss += tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits[2], self.tf_train_labels[:, 2]))
            self.loss += tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits[3], self.tf_train_labels[:, 3]))
            self.loss += tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits[4], self.tf_train_labels[:, 4]))
            self.loss += tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits[5], self.tf_train_labels[:, 5]), name="loss")
            # self.loss += self.apply_regularization(_lambda=5e-4)
            # Add scalar summary for cost
            self.train_summaries.append(tf.scalar_summary('Loss', self.loss))

        # Optimizer
        with tf.name_scope('optimizer'):
            # learning rate decay
            global_step = tf.Variable(0)
            learning_rate = tf.train.exponential_decay(
                learning_rate=self.base_learning_rate,
                global_step=global_step * self.train_batch_size,
                decay_steps=100,
                decay_rate=self.decay_rate,
                staircase=True,
                name='learning_rate'
            )
            if (self.optimize_method == 'gradient'):
                self.optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
            elif (self.optimize_method == 'momentum'):
                self.optimizer = tf.train.MomentumOptimizer(learning_rate, 0.5).minimize(self.loss)
            elif (self.optimize_method == 'adam'):
                self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)

        # Predictions for the training, and test data.
        with tf.name_scope('train'):
            self.train_prediction = tf.pack(logits)
        with tf.name_scope('test'):
            self.test_prediction = self.softmax_combine(self.tf_test_samples, train=False)

        self.merged_train_summary = tf.merge_summary(self.train_summaries)
        self.merged_test_summary = tf.merge_summary(self.test_summaries)

        self.saver = tf.train.Saver(tf.all_variables())