Python tensorflow 模块,add_to_collection() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.add_to_collection()

项目:acdc_segmenter    作者:baumgach    | 项目源码 | 文件源码
def get_weight_variable(shape, name=None, type='xavier_uniform', regularize=True, **kwargs):

    initialise_from_constant = False
    if type == 'xavier_uniform':
        initial = xavier_initializer(uniform=True, dtype=tf.float32)
    elif type == 'xavier_normal':
        initial = xavier_initializer(uniform=False, dtype=tf.float32)
    elif type == 'he_normal':
        initial = variance_scaling_initializer(uniform=False, factor=2.0, mode='FAN_IN', dtype=tf.float32)
    elif type == 'he_uniform':
        initial = variance_scaling_initializer(uniform=True, factor=2.0, mode='FAN_IN', dtype=tf.float32)
    elif type == 'caffe_uniform':
        initial = variance_scaling_initializer(uniform=True, factor=1.0, mode='FAN_IN', dtype=tf.float32)
    elif type == 'simple':
        stddev = kwargs.get('stddev', 0.02)
        initial = tf.truncated_normal(shape, stddev=stddev, dtype=tf.float32)
        initialise_from_constant = True
    elif type == 'bilinear':
        weights = _bilinear_upsample_weights(shape)
        initial = tf.constant(weights, shape=shape, dtype=tf.float32)
        initialise_from_constant = True
    else:
        raise ValueError('Unknown initialisation requested: %s' % type)

    if name is None:  # This keeps to option open to use unnamed Variables
        weight = tf.Variable(initial)
    else:
        if initialise_from_constant:
            weight = tf.get_variable(name, initializer=initial)
        else:
            weight = tf.get_variable(name, shape=shape, initializer=initial)

    if regularize:
        tf.add_to_collection('weight_variables', weight)

    return weight
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def model(self, features, labels):
        x = features["observation"]
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        actions = tf.one_hot(tf.reshape(features["action"],[-1]), depth=6, on_value=1.0, off_value=0.0, axis=1)
        x = tf.concat(1, [tf.contrib.layers.flatten(x),  actions])
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        logits = tf.contrib.layers.fully_connected(x, 1, activation_fn=None)
        prediction = tf.sigmoid(logits, name="prediction")
        loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.expand_dims(labels, axis=1)),name="loss")
        train_op = tf.contrib.layers.optimize_loss(
          loss, tf.contrib.framework.get_global_step(), optimizer='Adam',
          learning_rate=self.learning_rate)
        tf.add_to_collection('prediction', prediction)
        tf.add_to_collection('loss', loss)
        return prediction, loss, train_op
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def _get_summary(self):
        # build the self.summ_op for tensorboard
        # This function could be overwritten
        if not self.flags.visualize or self.flags.visualize=='none':
            return
        summ_collection = "{} {} {} summaries".format(self.flags.comp, self.flags.sol, self.flags.run_name)
        if len(tf.get_collection(tf.GraphKeys.SCALARS)):
            self.scaler_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.SCALARS))
        if len(tf.get_collection(tf.GraphKeys.IMAGES)):
            self.image_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.IMAGES))

        for i in tf.get_collection(tf.GraphKeys.SCALARS):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.WEIGHTS):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.FEATURE_MAPS):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.IMAGES):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.GRADIENTS):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.EMBEDDINGS):
            tf.add_to_collection(summ_collection, i)
        self.summ_op = tf.summary.merge(tf.get_collection(summ_collection))
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, wd, initializer, trainable=True):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_device(name, shape, initializer, trainable)
  if wd is not None and trainable:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def highway(self, input_1, input_2, size_1, size_2, l2_penalty=1e-8, layer_size=1):
        output = input_2
        for idx in range(layer_size):
            with tf.name_scope('output_lin_%d' % idx):
                W = tf.Variable(tf.truncated_normal([size_2,size_1], stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[size_1]), name="b")
                tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(W))
                tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(b))
                output = tf.nn.relu(tf.nn.xw_plus_b(output,W,b))
            with tf.name_scope('transform_lin_%d' % idx):
                W = tf.Variable(tf.truncated_normal([size_1,size_1], stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[size_1]), name="b")
                tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(W))
                tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(b))
                transform_gate = tf.sigmoid(tf.nn.xw_plus_b(input_1,W,b))
            carry_gate = tf.constant(1.0) - transform_gate
            output = transform_gate * output + carry_gate * input_1
        return output
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def conv_block(self, input, out_size, layer, kernalsize=3, l2_penalty=1e-8, shortcut=False):
        in_shape = input.get_shape().as_list()
        if layer>0:
            filter_shape = [kernalsize, 1, in_shape[3], out_size]
        else:
            filter_shape = [kernalsize, in_shape[2], 1, out_size]
        W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W-%s" % layer)
        b = tf.Variable(tf.constant(0.1, shape=[out_size]), name="b-%s" % layer)
        tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(W))
        tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(b))
        if layer>0:
            conv = tf.nn.conv2d(input, W, strides=[1, 1, 1, 1], padding="SAME", name="conv-%s" % layer)
        else:
            conv = tf.nn.conv2d(input, W, strides=[1, 1, 1, 1], padding="VALID", name="conv-%s" % layer)
        if shortcut:
            shortshape = [1,1,in_shape[3], out_size]
            Ws = tf.Variable(tf.truncated_normal(shortshape, stddev=0.05), name="Ws-%s" % layer)
            tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(Ws))
            conv = conv + tf.nn.conv2d(input, Ws, strides=[1, 1, 1, 1], padding="SAME", name="conv-shortcut-%s" % layer)
        h = tf.nn.bias_add(conv, b)
        h2 = tf.nn.relu(tf.contrib.layers.batch_norm(h, center=True, scale=True, epsilon=1e-5, decay=0.9), name="relu-%s" % layer)

        return h2
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
        """Creates a logistic model.

        Args:
          model_input: 'batch' x 'num_features' matrix of input features.
          vocab_size: The number of classes in the dataset.

        Returns:
          A dictionary with a tensor containing the probability predictions of the
          model in the 'predictions' key. The dimensions of the tensor are
          batch_size x num_classes."""

        input_size = vocab_size
        output_size = FLAGS.hidden_size
        with tf.name_scope("rbm"):
            self.weights = tf.Variable(tf.truncated_normal([input_size, output_size],
                                    stddev=1.0 / math.sqrt(float(input_size))), name="weights")
            self.v_bias = tf.Variable(tf.zeros([input_size]), name="v_bias")
            self.h_bias = tf.Variable(tf.zeros([output_size]), name="h_bias")
            tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.weights))
            tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.v_bias))
            tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.h_bias))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def model(self, features, labels):
        x = features["observation"]
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        x = tf.contrib.layers.flatten(x)
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        logits = tf.contrib.layers.fully_connected(x, 1, activation_fn=None)
        prediction = tf.sigmoid(logits)
        loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.expand_dims(labels, axis=1)))
        train_op = tf.contrib.layers.optimize_loss(
          loss, tf.contrib.framework.get_global_step(), optimizer='Adam',
          learning_rate=0.01)
        tf.add_to_collection('prediction', prediction)
        tf.add_to_collection('loss', loss)
        return prediction, loss, train_op
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:dcan-tensorflow    作者:lisjin    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name: name of the variable
        shape: list of ints
        stddev: standard deviation of a truncated Gaussian
        wd: add L2Loss weight decay multiplied by this float. If None, weight
            decay is not added for this Variable.
    Returns:
        Variable Tensor
    """
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None and not tf.get_variable_scope().reuse:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:dcan-tensorflow    作者:lisjin    | 项目源码 | 文件源码
def _add_cross_entropy(labels, logits, pref):
    """Compute average cross entropy and add to loss collection.
    Args:
        labels: Single dimension labels from distorted_inputs() or inputs().
        logits: Output map from inference().
        pref: Either 'c' or 's', for contours or segments, respectively.
    """
    with tf.variable_scope('{}_cross_entropy'.format(pref)) as scope:
        class_prop = C_CLASS_PROP if pref == 'c' else S_CLASS_PROP
        weight_per_label = tf.scalar_mul(class_prop, tf.cast(tf.equal(labels, 0),
                                                             tf.float32)) + \
                           tf.scalar_mul(1.0 - class_prop, tf.cast(tf.equal(labels, 1),
                                                                   tf.float32))
        cross_entropy = tf.losses.sparse_softmax_cross_entropy(
            labels=tf.squeeze(labels, squeeze_dims=[3]), logits=logits)
        cross_entropy_weighted = tf.multiply(weight_per_label, cross_entropy)
        cross_entropy_mean = tf.reduce_mean(cross_entropy_weighted, name=scope.name)
        tf.add_to_collection('losses', cross_entropy_mean)
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def inference(cls, lowres_input, fullres_input, params,
                is_training=False):

    with tf.variable_scope('coefficients'):
      bilateral_coeffs = cls._coefficients(lowres_input, params, is_training)
      tf.add_to_collection('bilateral_coefficients', bilateral_coeffs)

    with tf.variable_scope('guide'):
      guide = cls._guide(fullres_input, params, is_training)
      tf.add_to_collection('guide', guide)

    with tf.variable_scope('output'):
      output = cls._output(
          fullres_input, guide, bilateral_coeffs)
      tf.add_to_collection('output', output)

    return output
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def inference(cls, lowres_input, fullres_input, params,
                is_training=False):

    with tf.variable_scope('coefficients'):
      bilateral_coeffs = cls._coefficients(lowres_input, params, is_training)
      tf.add_to_collection('bilateral_coefficients', bilateral_coeffs)

    with tf.variable_scope('multiscale'):
      multiscale = cls._multiscale_input(fullres_input)
      for m in multiscale:
        tf.add_to_collection('multiscale', m)

    with tf.variable_scope('guide'):
      guide = cls._guide(multiscale, params, is_training)
      for g in guide:
        tf.add_to_collection('guide', g)

    with tf.variable_scope('output'):
      output = cls._output(multiscale, guide, bilateral_coeffs)
      tf.add_to_collection('output', output)

    return output
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def _get_weight_variable(self, layer_name, name, shape, L2=1):
        wname = '%s/%s:0'%(layer_name,name)
        fanin, fanout = shape[-2:]
        for dim in shape[:-2]:
            fanin *= float(dim)
            fanout *= float(dim)

        sigma = self._xavi_norm(fanin, fanout)
        if self.weights is None or wname not in self.weights:
            w1 = tf.get_variable(name,initializer=tf.truncated_normal(shape = shape,
                mean=0,stddev = sigma))
            print('{:>23} {:>23}'.format(wname, 'randomly initialize'))
        else:
            w1 = tf.get_variable(name, shape = shape,
                initializer=tf.constant_initializer(value=self.weights[wname],dtype=tf.float32))
            self.loaded_weights[wname]=1
        if wname != w1.name:
            print(wname,w1.name)
            assert False
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(w1)*L2)
        return w1
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l1_loss(tensor, weight=1.0, scope=None):
  """Define a L1Loss, useful for regularize, i.e. lasso.

  Args:
    tensor: tensor to regularize.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    the L1 loss op.
  """
  with tf.op_scope([tensor], scope, 'L1Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l2_loss(tensor, weight=1.0, scope=None, normalize=False):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for op_scope.

  Returns:
    the L2 loss op.
  """
  with tf.op_scope([tensor], scope, 'L2Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    if normalize:
      loss = tf.sqrt( (tf.sqrt( tf.nn.l2_loss(tensor)) / tf.to_float(tf.size(tensor)))  , name='value')
    else:
      loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')

    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def sparse_cross_entropy_loss(logits, labels,
                       weight=1.0, scope=None):
  """Define a Cross Entropy loss using sparse_softmax_cross_entropy_with_logits.

  It can scale the loss by weight factor, and smooth the labels.

  Args:
    logits: [batch_size, num_classes] logits outputs of the network .
    labels: [batch_size,] target labels.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    A tensor with the softmax_cross_entropy loss.
  """
  with tf.op_scope([logits, labels], scope, 'SparseCrossEntropyLoss'):
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,labels,name='xentropy')
    weight = tf.convert_to_tensor(weight,
                                    dtype=logits.dtype.base_dtype,
                                    name='loss_weight')

    loss = tf.mul(weight, tf.reduce_mean(cross_entropy), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def add_variable(var, restore=True):
  """Adds a variable to the MODEL_VARIABLES collection.

    Optionally it will add the variable to  the VARIABLES_TO_RESTORE collection.
  Args:
    var: a variable.
    restore: whether the variable should be added to the
      VARIABLES_TO_RESTORE collection.

  """
  collections = [MODEL_VARIABLES]
  if restore:
    collections.append(VARIABLES_TO_RESTORE)
  for collection in collections:
    if var not in tf.get_collection(collection):
      tf.add_to_collection(collection, var)
项目:attend_infer_repeat    作者:akosiorek    | 项目源码 | 文件源码
def make_moving_average(name, value, init, decay, log=True):
    """Creates an exp-moving average of `value` and an update op, which is added to UPDATE_OPS collection.

    :param name: string, name of the created moving average tf.Variable
    :param value: tf.Tensor, the value to be averaged
    :param init: float, an initial value for the moving average
    :param decay: float between 0 and 1, exponential decay of the moving average
    :param log: bool, add a summary op if True
    :return: tf.Tensor, the moving average
    """
    var = tf.get_variable(name, shape=value.get_shape(),
                          initializer=tf.constant_initializer(init), trainable=False)

    update = moving_averages.assign_moving_average(var, value, decay, zero_debias=False)
    tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update)
    if log:
        tf.summary.scalar(name, var)

    return var
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _variable_with_weight_decay(self, name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.                                                                                                               
    Note that the Variable is initialized with a truncated normal distribution.                                                                                                                             
    A weight decay is added only if one is specified.                                                                                                                                                       
    Args:                                                                                                                                                                                                   
    name: name of the variable                                                                                                                                                                              
    shape: list of ints                                                                                                                                                                                     
    stddev: standard deviation of a truncated Gaussian                                                                                                                                                      
    wd: add L2Loss weight decay multiplied by this float. If None, weight                                                                                                                                   
        decay is not added for this Variable.                                                                                                                                                               
    Returns:                                                                                                                                                                                                
    Variable Tensor                                                                                                                                                                                         
    """
    dtype = tf.float32 #if FLAGS.use_fp16 else tf.float32                                                                                                                                                    
    var = self._variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None:
      weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
      tf.add_to_collection('losses', weight_decay)
    return var
项目:3D_CNN_jonas    作者:2015ZxEE    | 项目源码 | 文件源码
def variable_with_weight_decay(name, shape, stddev, wd):
    """
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name   -> name of the variable
        shape  -> list of ints
        stddev -> standard deviation of a truncated Gaussian
        wd     -> add L2Loss weight decay multiplied by this float.
                        If None, weight decay is not added for this Variable.
    Rtns:
        var    -> variable tensor
    """
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var   = variable_on_cpu(name,shape,
                    tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var),wd,name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:3D_CNN_jonas    作者:2015ZxEE    | 项目源码 | 文件源码
def loss(logits, label_batch):
    """
    Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        logits      -> logits from inference()
        label_batch -> 1D tensor of [batch_size]
    Rtns:
        total_loss  -> float tensor
    """
    # Calculate the average cross entropy loss across the batch.
    label_batch        = tf.cast(label_batch,tf.int64)
    cross_entropy      = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,
                                label_batch,name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses',cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:unsupervised-2017-cvprw    作者:imatge-upc    | 项目源码 | 文件源码
def tower_loss(name_scope, autoencoder, clips):
    # calculate reconstruction loss
    rec_loss = tf.reduce_mean(tf.abs(clips-autoencoder.rec_vid))

    weight_decay_loss_list = tf.get_collection('losses', name_scope)
    weight_decay_loss = 0.0
    if len(weight_decay_loss_list) > 0:
        weight_decay_loss = tf.add_n(weight_decay_loss_list)

    tf.add_to_collection('losses', rec_loss)
    losses = tf.get_collection('losses', name_scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    return total_loss, rec_loss, weight_decay_loss
项目:deep-time-reading    作者:felixduvallet    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:deep-time-reading    作者:felixduvallet    | 项目源码 | 文件源码
def _loss_shared(logits, labels):
    """Add L2Loss to all the trainable variables.

    Add summary for "Loss" and "Loss/avg".
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size]

    Returns:
      Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits, labels, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:tf-cnn-lstm-ocr-captcha    作者:Luonic    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:baselines    作者:openai    | 项目源码 | 文件源码
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
    with tf.variable_scope(name, reuse=reuse):
        assert (len(U.scope_name().split('/')) == 2)

        w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
        b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
        weight_decay_fc = 3e-4

        if weight_loss_dict is not None:
            weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
            if weight_loss_dict is not None:
                weight_loss_dict[w] = weight_decay_fc
                weight_loss_dict[b] = 0.0

            tf.add_to_collection(U.scope_name().split('/')[0] + '_' + 'losses', weight_decay)

        return tf.nn.bias_add(tf.matmul(x, w), b)
项目:dtnn    作者:atomistic-machine-learning    | 项目源码 | 文件源码
def embedding(indices, n_vocabulary, n_out,
              weight_init=glorot_uniform,
              reference=None,
              trainable=True,
              scope=None, reuse=False, name='Embedding'):
    if type(n_out) is int:
        n_out = (n_out,)
    with tf.variable_scope(scope, default_name=name, reuse=reuse) as scope:
        if reference is None:
            W = tf.get_variable('W', shape=(n_vocabulary,) + n_out,
                                initializer=weight_init,
                                trainable=trainable)
        else:
            W = tf.get_variable('W', shape=(n_vocabulary,) + n_out,
                                initializer=reference_initializer(reference),
                                trainable=trainable)
        tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)

        y = tf.nn.embedding_lookup(W, indices)
    return y
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, predictions, real_values):
        """Return the loss operation between predictions and real_values.
        Add L2 weight decay term if any.
        Args:
            predictions: predicted values
            real_values: real values
        Returns:
            Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # 1/2n \sum^{n}_{i=i}{(x_i - x'_i)^2}
            mse = tf.divide(
                tf.reduce_mean(
                    tf.square(tf.subtract(predictions, real_values))),
                2.,
                name="mse")
            tf.add_to_collection(LOSSES, mse)

            # mse + weight_decay per layer
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def get_conv_filter(self, params):
        if params["name"]+"/weights" in self.modelDict:
            init = tf.constant_initializer(value=self.modelDict[params["name"]+"/weights"], dtype=tf.float32)
            var = tf.get_variable(name="weights", initializer=init, shape=params["shape"])
            print "loaded " + params["name"]+"/weights"
        else:
            if params["std"]:
                stddev = params["std"]
            else:
                fanIn = params["shape"][0]*params["shape"][1]*params["shape"][2]
                stddev = (2/float(fanIn))**0.5

            init = tf.truncated_normal(shape=params["shape"], stddev=stddev, seed=0)
            var = tf.get_variable(name="weights", initializer=init)
            print "generated " + params["name"] + "/weights"

        if not tf.get_variable_scope().reuse:
            weightDecay = tf.mul(tf.nn.l2_loss(var), self._wd,
                                  name='weight_loss')
            tf.add_to_collection('losses', weightDecay)

        return var
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def get_conv_filter(self, params):
        if params["name"]+"/weights" in self.modelDict:
            init = tf.constant_initializer(value=self.modelDict[params["name"]+"/weights"], dtype=tf.float32)
            var = tf.get_variable(name="weights", initializer=init, shape=params["shape"])
            print "loaded " + params["name"]+"/weights"
        else:
            if params["std"]:
                stddev = params["std"]
            else:
                fanIn = params["shape"][0]*params["shape"][1]*params["shape"][2]
                stddev = (2/float(fanIn))**0.5

            init = tf.truncated_normal(shape=params["shape"], stddev=stddev, seed=0)
            var = tf.get_variable(name="weights", initializer=init)
            print "generated " + params["name"] + "/weights"

        if not tf.get_variable_scope().reuse:
            weightDecay = tf.mul(tf.nn.l2_loss(var), self._wd,
                                  name='weight_loss')
            tf.add_to_collection('losses', weightDecay)

        return var
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def get_collection_handle(self, names):
        """
        Get collection handles, collection are saved via `tf.add_to_collection`.

        Arguments:
            names: list of names

        Returns:
            List of ngraph ops corresponding to the names
        """
        if self._graph is None:
            raise ValueError("self.graph is empty, import meta_graph first.")
        with self._graph.as_default():
            tf_nodes = [tf.get_collection(name)[0] for name in names]
            ng_ops = [self.get_op_handle(tf_node) for tf_node in tf_nodes]
            return ng_ops
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')