Python tensorflow 模块,scatter_sub() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用tensorflow.scatter_sub()

项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def center_loss(features, label, alpha, num_classes, name='center_loss'):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)

    Args:
        features: 2-D `tensor` [batch_size, feature_length], input features
        label: 1-D `tensor` [batch_size], input label
        alpha: center loss parameter
        num_classes: a `int` numof classes for training

    Returns:
        a `float`, center loss
    """
    with tf.variable_scope(name):
        num_features = features.get_shape()[1]
        centers = tf.get_variable('centers', [num_classes, num_features], dtype=tf.float32,
                                  initializer=tf.constant_initializer(0), trainable=False)
        label = tf.reshape(label, [-1])
        centers_batch = tf.gather(centers, label)
        diff = (1 - alpha) * (centers_batch - features)
        centers = tf.scatter_sub(centers, label, diff)
        loss = tf.nn.l2_loss(features - centers_batch)
        return loss, centers
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _clip_sparse(self, grad, var):
        assert isinstance(grad, tf.IndexedSlices)
        clip_dims = self._vars_to_clip_dims[var]
        if 0 in clip_dims:
            log.warn("Clipping norm across dims %s for %s is inefficient "
                     "when including sparse dimension 0.", clip_dims,
                     var.op.name)
            return self._clip_dense(var)

        with tf.colocate_with(var):
            var_subset = tf.gather(var, grad.indices)
        with self._maybe_colocate_with(var):
            normalized_var_subset = tf.clip_by_norm(
                var_subset, self._max_norm, clip_dims)
            delta = tf.IndexedSlices(
                var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
        with tf.colocate_with(var):
            return var.scatter_sub(delta, use_locking=self._use_locking)
项目:tf-re-id    作者:jhb86253817    | 项目源码 | 文件源码
def center_loss(features, label, label_stats, centers, alfa):
    """The center loss.
       features: [batch_size, 512], the embedding of images. 
       label: [batch_size, class_num], class label, the label index is 1, others are 0.
       labels_stats: [batch_size, 1], the count of each label in the batch.
       centers: [class_num, 512], center points, each class have one.
       alfa: float, updating rate of centers.
    """
    label = tf.arg_max(label, 1)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = alfa * (centers_batch - features)
    diff = diff / label_stats
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.nn.l2_loss(features - centers_batch)
    return loss, centers
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
        initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.reduce_mean(tf.square(features - centers_batch))
    return loss, centers
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
        initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.reduce_mean(tf.square(features - centers_batch))
    return loss, centers
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def center_loss(features, label, alpha, num_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    dim_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [num_classes, dim_features], dtype=tf.float32,
                              initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    center_feats = tf.gather(centers, label)
    diff = (1 - alpha) * tf.subtract(center_feats, features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.nn.l2_loss(features - center_feats)
    return loss, centers
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
        initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.reduce_mean(tf.square(features - centers_batch))
    return loss, centers
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _apply_sparse(self, grad, var):
        beta1_power = tf.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = tf.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
        lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m := beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_t = tf.scatter_update(m, grad.indices,
                                beta1_t * tf.gather(m, grad.indices) +
                                (1 - beta1_t) * grad.values,
                                use_locking=self._use_locking)

        # v := beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_t = tf.scatter_update(v, grad.indices,
                                beta2_t * tf.gather(v, grad.indices) +
                                (1 - beta2_t) *
                                tf.square(grad.values),
                                use_locking=self._use_locking)

        # variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))
        m_t_slice = tf.gather(m_t, grad.indices)
        v_t_slice = tf.gather(v_t, grad.indices)
        denominator_slice = tf.sqrt(v_t_slice) + epsilon_t
        var_update = tf.scatter_sub(var, grad.indices,
                                    lr * m_t_slice / denominator_slice,
                                    use_locking=self._use_locking)
        return tf.group(var_update, m_t, v_t)
项目:Sing_Par    作者:wanghm92    | 项目源码 | 文件源码
def _finish(self, caches):
    """"""

    if self.clip > 0:
      S_t = [cache['s_t'] for cache in caches]
      S_t, _ = tf.clip_by_global_norm(S_t, self.clip)
      for cache, s_t in zip(caches, S_t):
        cache['s_t'] = s_t

    for cache in caches:
      x_tm1 = cache['x_tm1']
      s_t = cache['s_t']
      updates = cache['updates']
      with tf.name_scope('update_' + x_tm1.op.name), tf.device(x_tm1.device):
        if 'idxs' in cache:
          idxs = cache['idxs']
          x_t = tf.scatter_sub(x_tm1, idxs, s_t)
          if self.chi > 0:
            x_t_ = tf.gather(x_t, idxs)
            x_bar_t, t_x_bar = self._sparse_moving_average(x_tm1, idxs, x_t_, 'x', beta=self.chi)
        else:
          x_t = tf.assign_sub(x_tm1, s_t)
          if self.chi > 0:
            x_bar_t, t_x_bar = self._dense_moving_average(x_tm1, x_t, 'x', beta=self.chi)
      updates.append(x_t)
      if self.chi > 0:
        updates.extend([x_bar_t, t_x_bar])

    update_ops = [tf.group(*cache['updates']) for cache in caches]
    return tf.group(*update_ops, name='update')

  #==============================================================
项目:Parser-v1    作者:tdozat    | 项目源码 | 文件源码
def _finish(self, caches):
    """"""

    if self.clip > 0:
      S_t = [cache['s_t'] for cache in caches]
      S_t, _ = tf.clip_by_global_norm(S_t, self.clip)
      for cache, s_t in zip(caches, S_t):
        cache['s_t'] = s_t

    for cache in caches:
      x_tm1 = cache['x_tm1']
      s_t = cache['s_t']
      updates = cache['updates']
      with tf.name_scope('update_' + x_tm1.op.name), tf.device(x_tm1.device):
        if 'idxs' in cache:
          idxs = cache['idxs']
          x_t = tf.scatter_sub(x_tm1, idxs, s_t)
          if self.chi > 0:
            x_t_ = tf.gather(x_t, idxs)
            x_bar_t, t_x_bar = self._sparse_moving_average(x_tm1, idxs, x_t_, 'x', beta=self.chi)
        else:
          x_t = tf.assign_sub(x_tm1, s_t)
          if self.chi > 0:
            x_bar_t, t_x_bar = self._dense_moving_average(x_tm1, x_t, 'x', beta=self.chi)
      updates.append(x_t)
      if self.chi > 0:
        updates.extend([x_bar_t, t_x_bar])

    update_ops = [tf.group(*cache['updates']) for cache in caches]
    return tf.group(*update_ops, name='update')

  #==============================================================
项目:facenet    作者:davidsandberg    | 项目源码 | 文件源码
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
        initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.reduce_mean(tf.square(features - centers_batch))
    return loss, centers
项目:DP_for_FaceNet    作者:guchinoma    | 项目源码 | 文件源码
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
        initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.nn.l2_loss(features - centers_batch)
    return loss, centers
项目:Face-Recognition    作者:aswl01    | 项目源码 | 文件源码
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
                              initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.reduce_mean(tf.square(features - centers_batch))
    return loss, centers
项目:UnstableParser    作者:tdozat    | 项目源码 | 文件源码
def _finish(self, caches):
    """"""

    if self.clip > 0:
      S_t = [cache['s_t'] for cache in caches]
      S_t, _ = tf.clip_by_global_norm(S_t, self.clip)
      for cache, s_t in zip(caches, S_t):
        cache['s_t'] = s_t

    for cache in caches:
      x_tm1 = cache['x_tm1']
      s_t = cache['s_t']
      updates = cache['updates']
      with tf.name_scope('update_' + x_tm1.op.name), tf.device(x_tm1.device):
        if 'idxs' in cache:
          idxs = cache['idxs']
          x_t = tf.scatter_sub(x_tm1, idxs, s_t)
          if self.chi > 0:
            x_t_ = tf.gather(x_t, idxs)
            x_bar_t, t_x_bar = self._sparse_moving_average(x_tm1, idxs, x_t_, 'x', beta=self.chi)
        else:
          x_t = tf.assign_sub(x_tm1, s_t)
          if self.chi > 0:
            x_bar_t, t_x_bar = self._dense_moving_average(x_tm1, x_t, 'x', beta=self.chi)
      updates.append(x_t)
      if self.chi > 0:
        updates.extend([x_bar_t, t_x_bar])

    update_ops = [tf.group(*cache['updates']) for cache in caches]
    return tf.group(*update_ops, name='update')

  #==============================================================
项目:real-time-face-recognition    作者:iwantooxxoox    | 项目源码 | 文件源码
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
        initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.reduce_mean(tf.square(features - centers_batch))
    return loss, centers