Python tensorflow.python.ops.init_ops 模块,zeros_initializer() 实例源码

我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用tensorflow.python.ops.init_ops.zeros_initializer()

项目:canshi    作者:hungsing92    | 项目源码 | 文件源码
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def create_global_step(graph=None):
  """Create global step tensor in graph.

  Args:
    graph: The graph in which to create the global step. If missing, use default
        graph.

  Returns:
    Global step tensor.

  Raises:
    ValueError: if global step key is already defined.
  """
  graph = ops.get_default_graph() if graph is None else graph
  if get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    collections = [ops.GraphKeys.VARIABLES, ops.GraphKeys.GLOBAL_STEP]
    return variable(ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64,
                    initializer=init_ops.zeros_initializer, trainable=False,
                    collections=collections)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_weighted_sum(self,
                      transformed_input_tensor,
                      num_outputs=1,
                      weight_collections=None,
                      trainable=True):
    """Returns a Tensor as linear predictions and a list of created Variable."""

    def _weight(name):
      return variable_scope.get_variable(
          name,
          shape=[self.dimension, num_outputs],
          initializer=init_ops.zeros_initializer,
          collections=_add_variable_collection(weight_collections))

    if self.name:
      weight = _weight("weight")
    else:
      # Old behavior to support a subset of old checkpoints.
      weight = _weight("_weight")

    # The _RealValuedColumn has the shape of [batch_size, column.dimension].
    log_odds_by_dim = math_ops.matmul(
        transformed_input_tensor, weight, name="matmul")
    return log_odds_by_dim, [weight]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_weighted_sum(self,
                      input_tensor,
                      num_outputs=1,
                      weight_collections=None,
                      trainable=True):
    output, embedding_weights = _create_embedding_lookup(
        input_tensor=input_tensor,
        weight_tensor=None,
        vocab_size=self.length,
        dimension=num_outputs,
        weight_collections=_add_variable_collection(weight_collections),
        initializer=init_ops.zeros_initializer,
        combiner=self.combiner,
        trainable=trainable)
    if self.ckpt_to_load_from is not None:
      weights_to_restore = embedding_weights
      if len(embedding_weights) == 1:
        weights_to_restore = embedding_weights[0]
      checkpoint_utils.init_from_checkpoint(
          self.ckpt_to_load_from,
          {self.tensor_name_in_ckpt: weights_to_restore})
    return output, embedding_weights
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def create_global_step(graph=None):
  """Create global step tensor in graph.

  Args:
    graph: The graph in which to create the global step. If missing, use default
        graph.

  Returns:
    Global step tensor.

  Raises:
    ValueError: if global step key is already defined.
  """
  graph = ops.get_default_graph() if graph is None else graph
  if get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    collections = [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]
    return variable(ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64,
                    initializer=init_ops.zeros_initializer, trainable=False,
                    collections=collections)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name, shape=value.get_shape(), dtype=value.dtype,
          initializer=init_ops.zeros_initializer, trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor*std)
    return max_norms, mean
项目:BDD_Driving_Model    作者:gy20073    | 项目源码 | 文件源码
def motion_tower(stage_status, image_features):
    with tf.variable_scope(stage_status):
        with tf.variable_scope('Ptrain'):
            if FLAGS.ss_bottleneck_arch:
                assert (FLAGS.early_split == False)
                image_features = slim.conv2d(image_features, FLAGS.city_num_classes, [1, 1], 1,
                                             normalizer_fn=None,
                                             activation_fn=None,
                                             biases_initializer=init_ops.zeros_initializer,
                                             padding='VALID',
                                             scope='segmentation_fc8',
                                             reuse=True if FLAGS.city_data else False)

                # reduce the dimensionality from the conv5 feature map
                with tf.variable_scope('motion_tower'):
                    # size 224/8 = 28
                    image_features = slim.conv2d(image_features, 64, [5, 5], 3, scope='motion1')
                    # size 9
                    image_features = slim.conv2d(image_features, 128, [5, 5], 3, scope='motion2')
                    # size 3
    return image_features
项目:relax    作者:duvenaud    | 项目源码 | 文件源码
def _create_baseline(self, n_output=1, n_hidden=100,
                       is_zero_init=False,
                       collection='BASELINE'):
    # center input
    h = self._x
    if self.mean_xs is not None:
      h -= self.mean_xs

    if is_zero_init:
      initializer = init_ops.zeros_initializer()
    else:
      initializer = slim.variance_scaling_initializer()

    with slim.arg_scope([slim.fully_connected],
                        variables_collections=[collection, Q_COLLECTION],
                        trainable=False,
                        weights_initializer=initializer):
      h = slim.fully_connected(h, n_hidden, activation_fn=tf.nn.tanh)
      baseline = slim.fully_connected(h, n_output, activation_fn=None)

      if n_output == 1:
        baseline = tf.reshape(baseline, [-1])  # very important to reshape
    return baseline
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_weighted_sum(self,
                      input_tensor,
                      num_outputs=1,
                      weight_collections=None,
                      trainable=True):
    return _create_embedding_lookup(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        dimension=num_outputs,
        weight_collections=_add_variable_collection(weight_collections),
        initializer=init_ops.zeros_initializer,
        combiner=self.combiner,
        trainable=trainable)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _to_embedding_lookup_arguments(self, input_tensor):
    return _EmbeddingLookupArguments(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer,
        combiner=self.combiner)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_weighted_sum(self,
                      input_tensor,
                      num_outputs=1,
                      weight_collections=None,
                      trainable=True):
    return _create_embedding_lookup(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        dimension=num_outputs,
        weight_collections=_add_variable_collection(weight_collections),
        initializer=init_ops.zeros_initializer,
        combiner=self.sparse_id_column.combiner,
        trainable=trainable)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_weighted_sum(self,
                      input_tensor,
                      num_outputs=1,
                      weight_collections=None,
                      trainable=True):
    """Returns a Tensor as linear predictions and a list of created Variable."""
    return _create_embedding_lookup(
        input_tensor=self.to_sparse_tensor(input_tensor),
        weight_tensor=None,
        vocab_size=self.length * self.source_column.dimension,
        dimension=num_outputs,
        weight_collections=_add_variable_collection(weight_collections),
        initializer=init_ops.zeros_initializer,
        combiner="sum",
        trainable=trainable)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _to_embedding_lookup_arguments(self, input_tensor):
    return _EmbeddingLookupArguments(
        input_tensor=self.to_sparse_tensor(input_tensor),
        weight_tensor=None,
        vocab_size=self.length * self.source_column.dimension,
        initializer=init_ops.zeros_initializer,
        combiner="sum")
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _to_embedding_lookup_arguments(self, input_tensor):
    return _EmbeddingLookupArguments(
        input_tensor=input_tensor,
        weight_tensor=None,
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer,
        combiner=self.combiner)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
  """Accumulate histograms in new variables."""
  with variable_scope.variable_scope(
      None, 'hist_accumulate', [hist_true, hist_false]):
    # Holds running total histogram of scores for records labeled True.
    hist_true_acc = variable_scope.get_variable(
        'hist_true_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_true.dtype),
        collections=collections,
        trainable=False)
    # Holds running total histogram of scores for records labeled False.
    hist_false_acc = variable_scope.get_variable(
        'hist_false_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_false.dtype),
        collections=collections,
        trainable=False)

    update_op = control_flow_ops.group(
        hist_true_acc.assign_add(hist_true),
        hist_false_acc.assign_add(hist_false),
        name='update_op')

    return hist_true_acc, hist_false_acc, update_op
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer,
        combiner=self.combiner)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer,
        combiner=self.sparse_id_column.combiner)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=input_tensor,
        weight_tensor=None,
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer,
        combiner=self.combiner)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
  """Accumulate histograms in new variables."""
  with variable_scope.variable_scope(
      None, 'hist_accumulate', [hist_true, hist_false]):
    # Holds running total histogram of scores for records labeled True.
    hist_true_acc = variable_scope.get_variable(
        'hist_true_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_true.dtype),
        collections=collections,
        trainable=False)
    # Holds running total histogram of scores for records labeled False.
    hist_false_acc = variable_scope.get_variable(
        'hist_false_acc',
        initializer=init_ops.zeros_initializer(
            [nbins],
            dtype=hist_false.dtype),
        collections=collections,
        trainable=False)

    update_op = control_flow_ops.group(
        hist_true_acc.assign_add(hist_true),
        hist_false_acc.assign_add(hist_false),
        name='update_op')

    return hist_true_acc, hist_false_acc, update_op
项目:document-qa    作者:allenai    | 项目源码 | 文件源码
def test_forget_bias(self):
        """
        Make sure the forget bias is only being applied to the forget gate
        """
        batches = 1
        num_units = 5
        num_inputs = 5

        hidden_size = (batches, num_units)
        input_size = (batches, num_inputs)

        inputs = tf.placeholder(dtype='float32', shape=input_size)
        h = tf.placeholder(dtype='float32', shape=hidden_size)
        with tf.variable_scope("test_bias"):
            i_t, j_t, f_t, o_t = _compute_gates(inputs, h, 4 * num_units, 1,
                                                init_ops.zeros_initializer(), init_ops.zeros_initializer())
        gates = [i_t, j_t, f_t, o_t]

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())

        # Make sure the bias is ONLY getting applied to the forget gate
        [i,j,f,o] = sess.run(gates, feed_dict={inputs: np.zeros(input_size), h: np.ones(hidden_size)})
        self.assertTrue(np.allclose(f, np.ones(f.shape), rtol=0))
        for x in [i,j,o]:
            self.assertTrue(np.allclose(x, np.zeros(x.shape), rtol=0))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def create_global_step(graph=None):
  """Create global step tensor in graph.

  Args:
    graph: The graph in which to create the global step. If missing, use default
        graph.

  Returns:
    Global step tensor.

  Raises:
    ValueError: if global step key is already defined.
  """
  graph = ops.get_default_graph() if graph is None else graph
  if get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    collections = [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]
    return variable(
        ops.GraphKeys.GLOBAL_STEP,
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        collections=collections)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _logistic_regression_model_fn(features, labels, mode):
  _ = mode
  logits = layers.linear(
      features,
      1,
      weights_initializer=init_ops.zeros_initializer(),
      # Intentionally uses really awful initial values so that
      # AUC/precision/recall/etc will change meaningfully even on a toy dataset.
      biases_initializer=init_ops.constant_initializer(-10.0))
  predictions = math_ops.sigmoid(logits)
  loss = loss_ops.sigmoid_cross_entropy(logits, labels)
  train_op = optimizers.optimize_loss(
      loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
  return predictions, loss, train_op
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer(),
        combiner=self.combiner)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=self.id_tensor(input_tensor),
        weight_tensor=self.weight_tensor(input_tensor),
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer(),
        combiner=self.sparse_id_column.combiner)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _wide_embedding_lookup_arguments(self, input_tensor):
    return _LinearEmbeddingLookupArguments(
        input_tensor=input_tensor,
        weight_tensor=None,
        vocab_size=self.length,
        initializer=init_ops.zeros_initializer(),
        combiner=self.combiner)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name,
          shape=value.get_shape(),
          dtype=value.dtype,
          initializer=init_ops.zeros_initializer(),
          trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor * std)
    return max_norms, mean
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
  """Accumulate histograms in new variables."""
  with variable_scope.variable_scope(
      None, 'hist_accumulate', [hist_true, hist_false]):
    # Holds running total histogram of scores for records labeled True.
    hist_true_acc = variable_scope.get_variable(
        'hist_true_acc',
        shape=[nbins],
        dtype=hist_true.dtype,
        initializer=init_ops.zeros_initializer(),
        collections=collections,
        trainable=False)
    # Holds running total histogram of scores for records labeled False.
    hist_false_acc = variable_scope.get_variable(
        'hist_false_acc',
        shape=[nbins],
        dtype=hist_true.dtype,
        initializer=init_ops.zeros_initializer(),
        collections=collections,
        trainable=False)

    update_op = control_flow_ops.group(
        hist_true_acc.assign_add(hist_true),
        hist_false_acc.assign_add(hist_false),
        name='update_op')

    return hist_true_acc, hist_false_acc, update_op
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testLSTMCell(self):
    # Run with all-0 weights, no padding.
    m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
    self.assertAllClose(m, [[0.]] * self._batch_size)
    self.assertAllClose(c, [[0.]] * self._batch_size)
    m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
    self.assertAllClose(m, [[.25]] * self._batch_size)
    self.assertAllClose(c, [[.5]] * self._batch_size)
    m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
    self.assertAllClose(m, [[.0]] * self._batch_size)
    self.assertAllClose(c, [[.0]] * self._batch_size)
    m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
    self.assertAllClose(m, [[.25]] * self._batch_size)
    self.assertAllClose(c, [[.5]] * self._batch_size)

    # Run with all-1 weights, no padding.
    for m_prev in [0., 1.]:
      for c_prev in [0., 1.]:
        m, c = self._RunLSTMCell('ones',
                                 init_ops.ones_initializer(), m_prev, c_prev,
                                 0.)
        self.assertAllClose(m, self._NextM(self._inputs, 1., m_prev, c_prev))
        self.assertAllClose(c, self._NextC(self._inputs, 1., m_prev, c_prev))

    # Run with random weights.
    for weight in np.random.rand(3):
      weight_tf = constant_op.constant(weight, dtypes.float32)
      random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)

      # No padding.
      for m_prev in [0., 1.]:
        for c_prev in [0., 1.]:
          m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 0.)
          self.assertAllClose(m,
                              self._NextM(self._inputs, weight, m_prev, c_prev))
          self.assertAllClose(c,
                              self._NextC(self._inputs, weight, m_prev, c_prev))

      # Set padding.
      for m_prev in [0., 1.]:
        for c_prev in [0., 1.]:
          m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 1.)
          self.assertAllClose(m, [[m_prev]] * self._batch_size)
          self.assertAllClose(c, [[c_prev]] * self._batch_size)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def bias_add(inputs,
             activation_fn=None,
             initializer=init_ops.zeros_initializer,
             regularizer=None,
             reuse=None,
             variables_collections=None,
             outputs_collections=None,
             trainable=True,
             scope=None):
  """Adds a bias to the inputs.

  Can be used as a normalizer function for conv2d and fully_connected.

  Args:
    inputs: a tensor of with at least rank 2 and value for the last dimension,
      e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
    activation_fn: activation function, default set to None to skip it and
      maintain a linear activation.
    initializer: An initializer for the bias, defaults to 0.
    regularizer: A regularizer like the result of
      `l1_regularizer` or `l2_regularizer`.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.
    variables_collections: optional collections for the variables.
    outputs_collections: collections to add the outputs.
    trainable: If `True` also add variables to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
    scope: Optional scope for variable_scope.

  Returns:
    a tensor representing the result of adding biases to the inputs.
  """
  with variable_scope.variable_scope(scope, 'BiasAdd', [inputs],
                                     reuse=reuse) as sc:
    inputs = ops.convert_to_tensor(inputs)
    dtype = inputs.dtype.base_dtype
    num_features = utils.last_dimension(inputs.get_shape(), min_rank=2)
    biases_collections = utils.get_variable_collections(variables_collections,
                                                        'biases')
    biases = variables.model_variable('biases',
                                      shape=[num_features,],
                                      dtype=dtype,
                                      initializer=initializer,
                                      regularizer=regularizer,
                                      collections=biases_collections,
                                      trainable=trainable)
    outputs = nn.bias_add(inputs, biases)
    if activation_fn is not None:
      outputs = activation_fn(outputs)
    return utils.collect_named_outputs(outputs_collections,
                                       sc.original_name_scope, outputs)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _create_joint_embedding_lookup(columns_to_tensors,
                                   embedding_lookup_arguments,
                                   num_outputs,
                                   trainable,
                                   weight_collections):
  """Creates an embedding lookup for all columns sharing a single weight."""
  for arg in embedding_lookup_arguments:
    assert arg.weight_tensor is None, (
        'Joint sums for weighted sparse columns are not supported. '
        'Please use weighted_sum_from_feature_columns instead.')
    assert arg.combiner == 'sum', (
        'Combiners other than sum are not supported for joint sums. '
        'Please use weighted_sum_from_feature_columns instead.')
  assert len(embedding_lookup_arguments) >= 1, (
      'At least one column must be in the model.')
  prev_size = 0
  sparse_tensors = []
  for a in embedding_lookup_arguments:
    t = a.input_tensor
    values = t.values + prev_size
    prev_size += a.vocab_size
    sparse_tensors.append(
        ops.SparseTensor(t.indices,
                         values,
                         t.shape))
  sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
  with variable_scope.variable_scope(
      None, default_name='linear_weights', values=columns_to_tensors.values()):
    variable = contrib_variables.model_variable(
        name='weights',
        shape=[prev_size, num_outputs],
        dtype=dtypes.float32,
        initializer=init_ops.zeros_initializer,
        trainable=trainable,
        collections=weight_collections)
    if isinstance(variable, variables.Variable):
      variable = [variable]
    else:
      variable = variable._get_variable_list()  # pylint: disable=protected-access
    predictions = embedding_ops.safe_embedding_lookup_sparse(
        variable,
        sparse_tensor,
        sparse_weights=None,
        default_id=0,
        combiner='sum',
        name='_weights')
    return variable, predictions
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _create_joint_embedding_lookup(columns_to_tensors,
                                   embedding_lookup_arguments,
                                   num_outputs,
                                   trainable,
                                   weight_collections):
  """Creates an embedding lookup for all columns sharing a single weight."""
  for arg in embedding_lookup_arguments:
    assert arg.weight_tensor is None, (
        'Joint sums for weighted sparse columns are not supported. '
        'Please use weighted_sum_from_feature_columns instead.')
    assert arg.combiner == 'sum', (
        'Combiners other than sum are not supported for joint sums. '
        'Please use weighted_sum_from_feature_columns instead.')
  assert len(embedding_lookup_arguments) >= 1, (
      'At least one column must be in the model.')
  prev_size = 0
  sparse_tensors = []
  for a in embedding_lookup_arguments:
    t = a.input_tensor
    values = t.values + prev_size
    prev_size += a.vocab_size
    sparse_tensors.append(
        sparse_tensor_py.SparseTensor(t.indices,
                                      values,
                                      t.shape))
  sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
  with variable_scope.variable_scope(
      None, default_name='linear_weights', values=columns_to_tensors.values()):
    variable = contrib_variables.model_variable(
        name='weights',
        shape=[prev_size, num_outputs],
        dtype=dtypes.float32,
        initializer=init_ops.zeros_initializer,
        trainable=trainable,
        collections=weight_collections)
    if isinstance(variable, variables.Variable):
      variable = [variable]
    else:
      variable = variable._get_variable_list()  # pylint: disable=protected-access
    predictions = embedding_ops.safe_embedding_lookup_sparse(
        variable,
        sparse_tensor,
        sparse_weights=None,
        combiner='sum',
        name='_weights')
    return variable, predictions
项目:self-supervision    作者:gustavla    | 项目源码 | 文件源码
def weighted_moving_average(value,
                            decay,
                            weight,
                            truediv=True,
                            collections=None,
                            name=None):
  """Compute the weighted moving average of `value`.

  Conceptually, the weighted moving average is:
    `moving_average(value * weight) / moving_average(weight)`,
  where a moving average updates by the rule
    `new_value = decay * old_value + (1 - decay) * update`
  Internally, this Op keeps moving average variables of both `value * weight`
  and `weight`.

  Args:
    value: A numeric `Tensor`.
    decay: A float `Tensor` or float value.  The moving average decay.
    weight:  `Tensor` that keeps the current value of a weight.
      Shape should be able to multiply `value`.
    truediv:  Boolean, if `True`, dividing by `moving_average(weight)` is
      floating point division.  If `False`, use division implied by dtypes.
    collections:  List of graph collections keys to add the internal variables
      `value * weight` and `weight` to.  Defaults to `[GraphKeys.VARIABLES]`.
    name: Optional name of the returned operation.
      Defaults to "WeightedMovingAvg".

  Returns:
    An Operation that updates and returns the weighted moving average.
  """
  # Unlike assign_moving_average, the weighted moving average doesn't modify
  # user-visible variables. It is the ratio of two internal variables, which are
  # moving averages of the updates.  Thus, the signature of this function is
  # quite different than assign_moving_average.
  if collections is None:
    collections = [ops.GraphKeys.VARIABLES]
  with variable_scope.variable_op_scope(
      [value, weight, decay], name, "WeightedMovingAvg") as scope:
    value_x_weight_var = variable_scope.get_variable(
        "value_x_weight",
        initializer=init_ops.zeros_initializer(value.get_shape(),
                                               dtype=value.dtype),
        trainable=False,
        collections=collections)
    weight_var = variable_scope.get_variable(
        "weight",
        initializer=init_ops.zeros_initializer(weight.get_shape(),
                                               dtype=weight.dtype),
        trainable=False,
        collections=collections)
    numerator = assign_moving_average(value_x_weight_var, value * weight, decay)
    denominator = assign_moving_average(weight_var, weight, decay)

    if truediv:
      return math_ops.truediv(numerator, denominator, name=scope.name)
    else:
      return math_ops.div(numerator, denominator, name=scope.name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testInitFromCheckpointMissing(self):
    checkpoint_dir = self.get_temp_dir()
    with self.test_session() as session:
      _, _, _, _ = _create_checkpoints(session, checkpoint_dir)

    # New graph and session.
    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as session:
        with variable_scope.variable_scope("some_scope"):
          _ = variable_scope.get_variable("my1", [10, 10])
          _ = variable_scope.get_variable(
              "my2", [1, 10],
              dtype=dtypes.int64,
              initializer=init_ops.zeros_initializer())

        # No directory.
        with self.assertRaises(errors_impl.OpError):
          checkpoint_utils.init_from_checkpoint("no_dir",
                                                {"var1": "some_scope/my1"})

        # No variable in checkpoint.
        with self.assertRaises(ValueError):
          checkpoint_utils.init_from_checkpoint(checkpoint_dir,
                                                {"no_var": "some_scope/my1"})

        # No variable in the graph.
        with self.assertRaises(ValueError):
          checkpoint_utils.init_from_checkpoint(checkpoint_dir,
                                                {"var3": "some_scope/no_var"})

        # Shape mismatch.
        with self.assertRaises(ValueError):
          checkpoint_utils.init_from_checkpoint(checkpoint_dir,
                                                {"var1": "some_scope/my1"})

        # Variable 'my1' and 'my2' are missing in given checkpoint scope.
        with self.assertRaises(ValueError):
          checkpoint_utils.init_from_checkpoint(
              checkpoint_dir, {"useful_scope/": "some_scope/"})

        # Mapping is not to scope name.
        with self.assertRaises(ValueError):
          checkpoint_utils.init_from_checkpoint(checkpoint_dir,
                                                {"useful_scope": "some_scope/"})
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _create_joint_embedding_lookup(columns_to_tensors,
                                   embedding_lookup_arguments,
                                   num_outputs,
                                   trainable,
                                   weight_collections):
  """Creates an embedding lookup for all columns sharing a single weight."""
  for arg in embedding_lookup_arguments:
    assert arg.weight_tensor is None, (
        'Joint sums for weighted sparse columns are not supported. '
        'Please use weighted_sum_from_feature_columns instead.')
    assert arg.combiner == 'sum', (
        'Combiners other than sum are not supported for joint sums. '
        'Please use weighted_sum_from_feature_columns instead.')
  assert len(embedding_lookup_arguments) >= 1, (
      'At least one column must be in the model.')
  prev_size = 0
  sparse_tensors = []
  for a in embedding_lookup_arguments:
    t = a.input_tensor
    values = t.values + prev_size
    prev_size += a.vocab_size
    sparse_tensors.append(
        sparse_tensor_py.SparseTensor(t.indices,
                                      values,
                                      t.dense_shape))
  sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
  with variable_scope.variable_scope(
      None, default_name='linear_weights', values=columns_to_tensors.values()):
    variable = contrib_variables.model_variable(
        name='weights',
        shape=[prev_size, num_outputs],
        dtype=dtypes.float32,
        initializer=init_ops.zeros_initializer(),
        trainable=trainable,
        collections=weight_collections)
    if isinstance(variable, variables.Variable):
      variable = [variable]
    else:
      variable = variable._get_variable_list()  # pylint: disable=protected-access
    predictions = embedding_ops.safe_embedding_lookup_sparse(
        variable,
        sparse_tensor,
        sparse_weights=None,
        combiner='sum',
        name='_weights')
    return variable, predictions