Python tensorflow.python.ops.init_ops 模块,truncated_normal_initializer() 实例源码

我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用tensorflow.python.ops.init_ops.truncated_normal_initializer()

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __new__(cls,
              column_name,
              size,
              dimension,
              combiner="sqrtn",
              initializer=None):
    if initializer is not None and not callable(initializer):
      raise ValueError("initializer must be callable if specified. "
                       "column_name: {}".format(column_name))
    if initializer is None:
      stddev = 0.1
      # TODO(b/25671353): Better initial value?
      initializer = init_ops.truncated_normal_initializer(
          mean=0.0, stddev=stddev)
    return super(_HashedEmbeddingColumn, cls).__new__(cls, column_name, size,
                                                      dimension, combiner,
                                                      initializer)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _create_partition_checkpoints(sess, checkpoint_dir):
  checkpoint_prefix = os.path.join(checkpoint_dir, "model")
  checkpoint_state_name = "checkpoint"
  with variable_scope.variable_scope("scope"):
    v1 = variable_scope.get_variable(
        name="var1",
        shape=[100, 100],
        initializer=init_ops.truncated_normal_initializer(0.5),
        partitioner=partitioned_variables.min_max_variable_partitioner(
            max_partitions=5, axis=0, min_slice_size=8 << 10))
  sess.run(variables.global_variables_initializer())
  v1_value = sess.run(v1._get_variable_list())
  saver = saver_lib.Saver()
  saver.save(
      sess,
      checkpoint_prefix,
      global_step=0,
      latest_filename=checkpoint_state_name)
  return v1_value
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNoScopes(self):
    init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
    init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))

    with self.test_session() as sess:
      initializer = init_ops.truncated_normal_initializer(stddev=.1)
      var0 = variables_lib2.variable(
          'my_var0', shape=[1, 3, 1], initializer=initializer)
      var1 = variables_lib2.variable(
          'my_var1', shape=[2, 1, 2], initializer=initializer)

      var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
      assign_op, feed_dict = variables_lib2.assign_from_values(
          var_names_to_values)

      # Initialize the variables.
      sess.run(variables_lib.global_variables_initializer())

      # Perform the assignment.
      sess.run(assign_op, feed_dict)

      # Request and test the variable values:
      var0, var1 = sess.run([var0, var1])
      self.assertAllEqual(init_value0, var0)
      self.assertAllEqual(init_value1, var1)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNoScopes(self):
    init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
    init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))

    with self.test_session() as sess:
      initializer = init_ops.truncated_normal_initializer(stddev=.1)
      var0 = variables_lib2.variable(
          'my_var0', shape=[1, 3, 1], initializer=initializer)
      var1 = variables_lib2.variable(
          'my_var1', shape=[2, 1, 2], initializer=initializer)

      var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
      init_fn = variables_lib2.assign_from_values_fn(var_names_to_values)

      # Initialize the variables.
      sess.run(variables_lib.global_variables_initializer())

      # Perform the assignment.
      init_fn(sess)

      # Request and test the variable values:
      var0, var1 = sess.run([var0, var1])
      self.assertAllEqual(init_value0, var0)
      self.assertAllEqual(init_value1, var1)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testGradientWithZeroWeight(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)

      inputs = array_ops.ones((2, 3))
      weights = variable_scope.get_variable(
          'weights',
          shape=[3, 4],
          initializer=init_ops.truncated_normal_initializer())
      predictions = math_ops.matmul(inputs, weights)

      optimizer = momentum_lib.MomentumOptimizer(
          learning_rate=0.001, momentum=0.9)
      loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)

      gradients_to_variables = optimizer.compute_gradients(loss)

      init_op = variables.global_variables_initializer()

      with self.test_session() as sess:
        sess.run(init_op)
        for grad, _ in gradients_to_variables:
          np_grad = sess.run(grad)
          self.assertFalse(np.isnan(np_grad).any())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __new__(cls,
              column_name,
              size,
              dimension,
              hash_key,
              combiner="sqrtn",
              initializer=None):
    if initializer is not None and not callable(initializer):
      raise ValueError("initializer must be callable if specified. "
                       "column_name: {}".format(column_name))
    if initializer is None:
      logging.warn("The default stddev value of initializer will change from "
                   "\"0.1\" to \"1/sqrt(dimension)\" after 2017/02/25.")
      stddev = 0.1
      initializer = init_ops.truncated_normal_initializer(
          mean=0.0, stddev=stddev)
    return super(_ScatteredEmbeddingColumn, cls).__new__(cls, column_name, size,
                                                         dimension, hash_key,
                                                         combiner,
                                                         initializer)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __new__(cls,
              sparse_id_column,
              dimension,
              combiner="sqrtn",
              initializer=None,
              ckpt_to_load_from=None,
              tensor_name_in_ckpt=None,
              shared_embedding_name=None,
              shared_vocab_size=None):
    if initializer is not None and not callable(initializer):
      raise ValueError("initializer must be callable if specified. "
                       "Embedding of column_name: {}".format(
                           sparse_id_column.name))

    if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
      raise ValueError("Must specify both `ckpt_to_load_from` and "
                       "`tensor_name_in_ckpt` or none of them.")
    if initializer is None:
      stddev = 1 / math.sqrt(sparse_id_column.length)
      # TODO(b/25671353): Better initial value?
      initializer = init_ops.truncated_normal_initializer(
          mean=0.0, stddev=stddev)
    return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
                                                dimension, combiner,
                                                initializer, ckpt_to_load_from,
                                                tensor_name_in_ckpt,
                                                shared_embedding_name,
                                                shared_vocab_size)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def embedding_column(sparse_id_column,
                     dimension,
                     combiner=None,
                     initializer=None,
                     ckpt_to_load_from=None,
                     tensor_name_in_ckpt=None):
  """Creates an `_EmbeddingColumn`.

  Args:
    sparse_id_column: A `_SparseColumn` which is created by for example
      `sparse_column_with_*` or crossed_column functions. Note that `combiner`
      defined in `sparse_id_column` is ignored.
    dimension: An integer specifying dimension of the embedding.
    combiner: A string specifying how to reduce if there are multiple entries
      in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
      of this can be considered an example level normalization on the column:
        * "sum": do not normalize
        * "mean": do l1 normalization
        * "sqrtn": do l2 normalization
      For more information: `tf.embedding_lookup_sparse`.
    initializer: A variable initializer function to be used in embedding
      variable initialization. If not specified, defaults to
      `tf.truncated_normal_initializer` with mean 0.0 and standard deviation
      1/sqrt(sparse_id_column.length).
    ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
      to restore the column weights. Required if `tensor_name_in_ckpt` is not
      None.
    tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
      checkpoint from which to restore the column weights. Required if
      `ckpt_to_load_from` is not None.

  Returns:
    An `_EmbeddingColumn`.
  """
  if combiner is None:
    logging.warn("The default value of combiner will change from \"mean\" "
                 "to \"sqrtn\" after 2016/11/01.")
    combiner = "mean"
  return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
                          ckpt_to_load_from, tensor_name_in_ckpt)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='hard_tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=variable_scope.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='hard_tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=variable_scope.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='stochastic_hard_tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='stochastic_hard_tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='stochastic_soft_tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='stochastic_soft_tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __new__(cls,
              sparse_id_column,
              dimension,
              combiner="sqrtn",
              initializer=None,
              ckpt_to_load_from=None,
              tensor_name_in_ckpt=None,
              shared_embedding_name=None,
              shared_vocab_size=None,
              max_norm=None):
    if initializer is not None and not callable(initializer):
      raise ValueError("initializer must be callable if specified. "
                       "Embedding of column_name: {}".format(
                           sparse_id_column.name))

    if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
      raise ValueError("Must specify both `ckpt_to_load_from` and "
                       "`tensor_name_in_ckpt` or none of them.")
    if initializer is None:
      stddev = 1 / math.sqrt(sparse_id_column.length)
      # TODO(b/25671353): Better initial value?
      initializer = init_ops.truncated_normal_initializer(
          mean=0.0, stddev=stddev)
    return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
                                                dimension, combiner,
                                                initializer, ckpt_to_load_from,
                                                tensor_name_in_ckpt,
                                                shared_embedding_name,
                                                shared_vocab_size,
                                                max_norm)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def embedding_column(sparse_id_column,
                     dimension,
                     combiner=None,
                     initializer=None,
                     ckpt_to_load_from=None,
                     tensor_name_in_ckpt=None):
  """Creates an `_EmbeddingColumn` for feeding sparse data into a DNN.

  Args:
    sparse_id_column: A `_SparseColumn` which is created by for example
      `sparse_column_with_*` or crossed_column functions. Note that `combiner`
      defined in `sparse_id_column` is ignored.
    dimension: An integer specifying dimension of the embedding.
    combiner: A string specifying how to reduce if there are multiple entries
      in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
      of this can be considered an example level normalization on the column:
        * "sum": do not normalize
        * "mean": do l1 normalization
        * "sqrtn": do l2 normalization
      For more information: `tf.embedding_lookup_sparse`.
    initializer: A variable initializer function to be used in embedding
      variable initialization. If not specified, defaults to
      `tf.truncated_normal_initializer` with mean 0.0 and standard deviation
      1/sqrt(sparse_id_column.length).
    ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
      to restore the column weights. Required if `tensor_name_in_ckpt` is not
      None.
    tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
      checkpoint from which to restore the column weights. Required if
      `ckpt_to_load_from` is not None.

  Returns:
    An `_EmbeddingColumn`.
  """
  if combiner is None:
    logging.warn("The default value of combiner will change from \"mean\" "
                 "to \"sqrtn\" after 2016/11/01.")
    combiner = "mean"
  return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
                          ckpt_to_load_from, tensor_name_in_ckpt)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features_per_node],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='hard_tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=variable_scope.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='hard_tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=variable_scope.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='stochastic_hard_tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='stochastic_hard_tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:u8m_test    作者:hxkk    | 项目源码 | 文件源码
def call(self, inputs, state, scope=None):
        with vs.variable_scope(scope or type(self).__name__):  # "GruRcnCell"
            with vs.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0.
                w_zrw = self._conv(inputs, self._num_outputs*3, self._ih_filter_h_length, self._ih_filter_w_length,
                                 self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WzrwConv")

                u_zr = self._conv(state, self._num_outputs*2, self._hh_filter_h_length, self._hh_filter_w_length, [1, 1, 1, 1],
                                 "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UzrConv")

                w_z, w_r, w =tf.split(value=w_zrw, num_or_size_splits=3, axis=3, name="w_split")
                u_z, u_r =tf.split(value=u_zr, num_or_size_splits=2, axis=3, name="u_split")

                z_bias = tf.get_variable(
                    name="z_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer()
                )
                z_gate = math_ops.sigmoid(tf.nn.bias_add(w_z + u_z, z_bias))

                r_bias = tf.get_variable(
                    name="r_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                r_gate = math_ops.sigmoid(tf.nn.bias_add(w_r + u_r, r_bias))

            with vs.variable_scope("Candidate"):
#                 w = self._conv(inputs, self._num_outputs, self._ih_filter_h_length, self._ih_filter_w_length,
#                                self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WConv")
                u = self._conv(r_gate * state, self._num_outputs, self._hh_filter_h_length, self._hh_filter_w_length,
                               [1, 1, 1, 1], "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UConv")
                c_bias = tf.get_variable(
                    name="c_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                c = math_ops.tanh(tf.nn.bias_add(w + u, c_bias))
            new_h = z_gate * state + (1 - z_gate) * c
        return new_h, new_h
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testWithScopes(self):
    init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
    init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))

    with self.test_session() as sess:
      initializer = init_ops.truncated_normal_initializer(stddev=.1)

      with variable_scope.variable_scope('my_model/my_layer0'):
        var0 = variables_lib2.variable(
            'my_var0', shape=[1, 3, 1], initializer=initializer)
      with variable_scope.variable_scope('my_model/my_layer1'):
        var1 = variables_lib2.variable(
            'my_var1', shape=[2, 1, 2], initializer=initializer)

      var_names_to_values = {
          'my_model/my_layer0/my_var0': init_value0,
          'my_model/my_layer1/my_var1': init_value1
      }
      assign_op, feed_dict = variables_lib2.assign_from_values(
          var_names_to_values)

      # Initialize the variables.
      sess.run(variables_lib.global_variables_initializer())

      # Perform the assignment.
      sess.run(assign_op, feed_dict)

      # Request and test the variable values:
      var0, var1 = sess.run([var0, var1])
      self.assertAllEqual(init_value0, var0)
      self.assertAllEqual(init_value1, var1)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __new__(cls,
              sparse_id_column,
              dimension,
              combiner="mean",
              initializer=None,
              ckpt_to_load_from=None,
              tensor_name_in_ckpt=None,
              shared_embedding_name=None,
              shared_vocab_size=None,
              max_norm=None):
    if initializer is not None and not callable(initializer):
      raise ValueError("initializer must be callable if specified. "
                       "Embedding of column_name: {}".format(
                           sparse_id_column.name))

    if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
      raise ValueError("Must specify both `ckpt_to_load_from` and "
                       "`tensor_name_in_ckpt` or none of them.")
    if initializer is None:
      logging.warn("The default stddev value of initializer will change from "
                   "\"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" after "
                   "2017/02/25.")
      stddev = 1 / math.sqrt(sparse_id_column.length)
      initializer = init_ops.truncated_normal_initializer(
          mean=0.0, stddev=stddev)
    return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
                                                dimension, combiner,
                                                initializer, ckpt_to_load_from,
                                                tensor_name_in_ckpt,
                                                shared_embedding_name,
                                                shared_vocab_size,
                                                max_norm)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testEmbeddingColumnWithMultipleInitializersFails(self):
    hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
    wire_tensor = sparse_tensor.SparseTensor(
        values=["omar", "stringer", "marlo"],
        indices=[[0, 0], [1, 0], [1, 1]],
        dense_shape=[2, 2])
    features = {"wire": wire_tensor}
    embedded_sparse = feature_column.embedding_column(
        hashed_sparse,
        10,
        initializer=init_ops.truncated_normal_initializer(
            mean=42, stddev=1337))
    embedded_sparse_alternate = feature_column.embedding_column(
        hashed_sparse,
        10,
        initializer=init_ops.truncated_normal_initializer(
            mean=1337, stddev=42))

    # Makes sure that trying to use different initializers with the same
    # embedding column explicitly fails.
    with self.test_session():
      with self.assertRaisesRegexp(
          ValueError,
          "Duplicate feature column key found for column: wire_embedding"):
        feature_column_ops.input_from_feature_columns(
            features, [embedded_sparse, embedded_sparse_alternate])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _random_weights(self, size=50, num_shards=1):
    assert size > 0
    assert num_shards > 0
    assert num_shards <= size

    embedding_weights = partitioned_variables.create_partitioned_variables(
        shape=[size],
        slicing=[num_shards],
        initializer=init_ops.truncated_normal_initializer(
            mean=0.0, stddev=1.0, dtype=dtypes.float32))
    for w in embedding_weights:
      w.initializer.run()
    return embedding_weights
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _random_weights(self, size=50, num_shards=1):
    assert size > 0
    assert num_shards > 0
    assert num_shards <= size

    embedding_weights = partitioned_variables.create_partitioned_variables(
        shape=[size],
        slicing=[num_shards],
        initializer=init_ops.truncated_normal_initializer(
            mean=0.0, stddev=1.0, dtype=dtypes.float32))
    for w in embedding_weights:
      w.initializer.run()
    return embedding_weights
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def hashed_embedding_column(column_name,
                            size,
                            dimension,
                            combiner=None,
                            initializer=None):
  """Creates an embedding column of a sparse feature using parameter hashing.

  The i-th embedding component of a value v is found by retrieving an
  embedding weight whose index is a fingerprint of the pair (v,i).

  Args:
    column_name: A string defining sparse column name.
    size: An integer specifying the number of parameters in the embedding layer.
    dimension: An integer specifying dimension of the embedding.
    combiner: A string specifying how to reduce if there are multiple entries
      in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
      of this can be thought as example level normalizations on the column:
        * "sum": do not normalize features in the column
        * "mean": do l1 normalization on features in the column
        * "sqrtn": do l2 normalization on features in the column
      For more information: `tf.embedding_lookup_sparse`.
    initializer: A variable initializer function to be used in embedding
      variable initialization. If not specified, defaults to
      `tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.

  Returns:
    A _HashedEmbeddingColumn.

  Raises:
    ValueError: if dimension or size is not a positive integer; or if combiner
      is not supported.

  """
  if combiner is None:
    logging.warn("The default value of combiner will change from \"mean\" "
                 "to \"sqrtn\" after 2016/11/01.")
    combiner = "mean"
  if (dimension < 1) or (size < 1):
    raise ValueError("Dimension and size must be greater than 0. "
                     "dimension: {}, size: {}, column_name: {}".format(
                         dimension, size, column_name))

  if combiner not in ("mean", "sqrtn", "sum"):
    raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
                     "combiner: {}, column_name: {}".format(combiner,
                                                            column_name))

  return _HashedEmbeddingColumn(column_name, size, dimension, combiner,
                                initializer)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def hashed_embedding_column(column_name,
                            size,
                            dimension,
                            combiner=None,
                            initializer=None):
  """Creates an embedding column of a sparse feature using parameter hashing.

  The i-th embedding component of a value v is found by retrieving an
  embedding weight whose index is a fingerprint of the pair (v,i).

  Args:
    column_name: A string defining sparse column name.
    size: An integer specifying the number of parameters in the embedding layer.
    dimension: An integer specifying dimension of the embedding.
    combiner: A string specifying how to reduce if there are multiple entries
      in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
      of this can be thought as example level normalizations on the column:
        * "sum": do not normalize features in the column
        * "mean": do l1 normalization on features in the column
        * "sqrtn": do l2 normalization on features in the column
      For more information: `tf.embedding_lookup_sparse`.
    initializer: A variable initializer function to be used in embedding
      variable initialization. If not specified, defaults to
      `tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.

  Returns:
    A _HashedEmbeddingColumn.

  Raises:
    ValueError: if dimension or size is not a positive integer; or if combiner
      is not supported.

  """
  if combiner is None:
    logging.warn("The default value of combiner will change from \"mean\" "
                 "to \"sqrtn\" after 2016/11/01.")
    combiner = "mean"
  if (dimension < 1) or (size < 1):
    raise ValueError("Dimension and size must be greater than 0. "
                     "dimension: {}, size: {}, column_name: {}".format(
                         dimension, size, column_name))

  if combiner not in ("mean", "sqrtn", "sum"):
    raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
                     "combiner: {}, column_name: {}".format(combiner,
                                                            column_name))

  return _HashedEmbeddingColumn(column_name, size, dimension, combiner,
                                initializer)
项目:u8m_test    作者:hxkk    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        isp = inputs.get_shape().as_list()
        M, H, W, C = self.input_size # S: Merged input number 
        assert isp[-1] == M * H * W * C
        mergedInputs = tf.reshape(inputs, shape=(-1, M, H, W, C))
        inputs, prevState = tf.unstack(mergedInputs, axis=1, name="unstack")

        with vs.variable_scope(scope or type(self).__name__):  # "GruRcnCell"
            with vs.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0.
                w_zrw = self._conv(inputs, self._num_outputs*3, self._ih_filter_h_length, self._ih_filter_w_length,
                                 self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WzrwConv")

                u_zr = self._conv(state, self._num_outputs*2, self._hh_filter_h_length, self._hh_filter_w_length, [1, 1, 1, 1],
                                 "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UzrConv")

                pervU_zr = self._conv(prevState, self._num_outputs*2, self._hh_filter_h_length, self._hh_filter_w_length, [1, 1, 1, 1],
                                 "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="PrevUzrConv")

                w_z, w_r, w =tf.split(value=w_zrw, num_or_size_splits=3, axis=3, name="w_split")
                u_z, u_r =tf.split(value=u_zr, num_or_size_splits=2, axis=3, name="u_split")
                prevU_z, prevU_r =tf.split(value=pervU_zr, num_or_size_splits=2, axis=3, name="prevU_split")

                z_bias = tf.get_variable(
                    name="z_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer()
                )
                z_gate = math_ops.sigmoid(tf.nn.bias_add(w_z + u_z + prevU_z, z_bias))

                r_bias = tf.get_variable(
                    name="r_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                r_gate = math_ops.sigmoid(tf.nn.bias_add(w_r + u_r + prevU_r, r_bias))

            with vs.variable_scope("Candidate"):
#                 w = self._conv(inputs, self._num_outputs, self._ih_filter_h_length, self._ih_filter_w_length,
#                                self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WConv")
                u = self._conv(r_gate * state, self._num_outputs, self._hh_filter_h_length, self._hh_filter_w_length,
                               [1, 1, 1, 1], "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UConv")
                c_bias = tf.get_variable(
                    name="c_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                c = math_ops.tanh(tf.nn.bias_add(w + u, c_bias))
            new_h = z_gate * state + (1 - z_gate) * c
        return new_h, new_h
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testInitFromPartitionVar(self):
    checkpoint_dir = self.get_temp_dir()
    with self.test_session() as session:
      v1 = _create_partition_checkpoints(session, checkpoint_dir)

    # New graph and session.
    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as session:
        with variable_scope.variable_scope("some_scope"):
          my1 = variable_scope.get_variable(
              name="my1",
              shape=[100, 100],
              initializer=init_ops.truncated_normal_initializer(0.5),
              partitioner=partitioned_variables.min_max_variable_partitioner(
                  max_partitions=5, axis=0, min_slice_size=8 << 10))
          my1_var_list = my1._get_variable_list()
        with variable_scope.variable_scope("some_other_scope"):
          my2 = variable_scope.get_variable(
              name="var1",
              shape=[100, 100],
              initializer=init_ops.truncated_normal_initializer(0.5),
              partitioner=partitioned_variables.min_max_variable_partitioner(
                  max_partitions=5, axis=0, min_slice_size=8 << 10))
          my2_var_list = my2._get_variable_list()

        checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
            "scope/var1": "some_scope/my1",
            "scope/": "some_other_scope/"})

        session.run(variables.global_variables_initializer())
        my1_values = session.run(my1_var_list)
        self.assertAllEqual(my1_values, v1)
        my2_values = session.run(my2_var_list)
        self.assertAllEqual(my2_values, v1)

    # New graph and session.
    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as session:
        with variable_scope.variable_scope("some_scope"):
          my1 = variable_scope.get_variable(
              name="my1",
              shape=[100, 100],
              initializer=init_ops.truncated_normal_initializer(0.5),
              partitioner=partitioned_variables.min_max_variable_partitioner(
                  max_partitions=5, axis=0, min_slice_size=8 << 10))
          my1_var_list = my1._get_variable_list()

        checkpoint_utils.init_from_checkpoint(checkpoint_dir,
                                              {"scope/var1": my1_var_list,})

        session.run(variables.global_variables_initializer())
        my1_values = session.run(my1_var_list)
        self.assertAllEqual(my1_values, v1)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def embedding_column(sparse_id_column,
                     dimension,
                     combiner="mean",
                     initializer=None,
                     ckpt_to_load_from=None,
                     tensor_name_in_ckpt=None,
                     max_norm=None):
  """Creates an `_EmbeddingColumn` for feeding sparse data into a DNN.

  Args:
    sparse_id_column: A `_SparseColumn` which is created by for example
      `sparse_column_with_*` or crossed_column functions. Note that `combiner`
      defined in `sparse_id_column` is ignored.
    dimension: An integer specifying dimension of the embedding.
    combiner: A string specifying how to reduce if there are multiple entries
      in a single row. Currently "mean", "sqrtn" and "sum" are supported, with
      "mean" the default. "sqrtn" often achieves good accuracy, in particular
      with bag-of-words columns. Each of this can be thought as example level
      normalizations on the column:
        * "sum": do not normalize
        * "mean": do l1 normalization
        * "sqrtn": do l2 normalization
      For more information: `tf.embedding_lookup_sparse`.
    initializer: A variable initializer function to be used in embedding
      variable initialization. If not specified, defaults to
      `tf.truncated_normal_initializer` with mean 0.0 and standard deviation
      1/sqrt(sparse_id_column.length).
    ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
      to restore the column weights. Required if `tensor_name_in_ckpt` is not
      None.
    tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
      checkpoint from which to restore the column weights. Required if
      `ckpt_to_load_from` is not None.
    max_norm: (Optional). If not None, embedding values are l2-normalized to
      the value of max_norm.

  Returns:
    An `_EmbeddingColumn`.
  """
  return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
                          ckpt_to_load_from, tensor_name_in_ckpt,
                          max_norm=max_norm)