Python tensorflow.python.ops.array_ops 模块,constant() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.constant()

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _safe_scalar_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is != 0.

  Args:
    numerator: A scalar `float64` `Tensor`.
    denominator: A scalar `float64` `Tensor`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` == 0, else `numerator` / `denominator`
  """
  numerator.get_shape().with_rank_at_most(1)
  denominator.get_shape().with_rank_at_most(1)
  return control_flow_ops.cond(
      math_ops.equal(
          array_ops.constant(0.0, dtype=dtypes.float64), denominator),
      lambda: array_ops.constant(0.0, dtype=dtypes.float64),
      lambda: math_ops.div(numerator, denominator),
      name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(
      self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
    self._dtypes = dtypes
    self._shapes = shapes
    self._shared_name = shared_name
    self._capacity = capacity
    self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                            dtypes=self._dtypes,
                                            shapes=self._shapes,
                                            name=self._shared_name,
                                            shared_name=self._shared_name)
    self._num_remote_feeds = 0

    # Fake do-nothing operation that's used to prevent remote queues
    # from being closed, and as a workaround for b/32749157
    self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
    self._feeding_event = threading.Event()
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _safe_scalar_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is 0.

  Args:
    numerator: A scalar `float64` `Tensor`.
    denominator: A scalar `float64` `Tensor`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` == 0, else `numerator` / `denominator`
  """
  numerator.get_shape().with_rank_at_most(1)
  denominator.get_shape().with_rank_at_most(1)
  return control_flow_ops.cond(
      math_ops.equal(
          array_ops.constant(0.0, dtype=dtypes.float64), denominator),
      lambda: array_ops.constant(0.0, dtype=dtypes.float64),
      lambda: math_ops.div(numerator, denominator),
      name=name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_get_output_alternatives_single_no_default(self):
    prediction_tensor = constant_op.constant(["bogus"])
    provided_output_alternatives = {
        "head-1": (constants.ProblemType.LINEAR_REGRESSION,
                   {"output": prediction_tensor}),
    }
    model_fn_ops = model_fn.ModelFnOps(
        model_fn.ModeKeys.INFER,
        predictions=prediction_tensor,
        output_alternatives=provided_output_alternatives)

    output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
        model_fn_ops)

    self.assertEqual({"head-1":
                      (constants.ProblemType.LINEAR_REGRESSION,
                       {"output": prediction_tensor})},
                     output_alternatives)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _safe_scalar_div(numerator, denominator, name):
  """Divides two values, returning 0 if the denominator is 0.

  Args:
    numerator: A scalar `float64` `Tensor`.
    denominator: A scalar `float64` `Tensor`.
    name: Name for the returned op.

  Returns:
    0 if `denominator` == 0, else `numerator` / `denominator`
  """
  numerator.get_shape().with_rank_at_most(1)
  denominator.get_shape().with_rank_at_most(1)
  return control_flow_ops.cond(
      math_ops.equal(
          array_ops.constant(0.0, dtype=dtypes.float64), denominator),
      lambda: array_ops.constant(0.0, dtype=dtypes.float64),
      lambda: math_ops.div(numerator, denominator),
      name=name)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _apply_transform(self, input_tensors, **kwargs):
    """Applies the transformation to the `transform_input`.

    Args:
      input_tensors: a list of Tensors representing the input to
        the Transform.
      **kwargs: Additional keyword arguments, unused here.

    Returns:
        A namedtuple of Tensors representing the transformed output.
    """
    d = input_tensors[0]

    if self.strip_value is np.nan:
      strip_hot = math_ops.is_nan(d)
    else:
      strip_hot = math_ops.equal(d,
                                 array_ops.constant([self.strip_value],
                                                    dtype=d.dtype))
    keep_hot = math_ops.logical_not(strip_hot)

    length = array_ops.reshape(array_ops.shape(d), [])
    indices = array_ops.boolean_mask(math_ops.range(length), keep_hot)
    values = array_ops.boolean_mask(d, keep_hot)

    sparse_indices = array_ops.reshape(
        math_ops.cast(indices, dtypes.int64), [-1, 1])
    shape = math_ops.cast(array_ops.shape(d), dtypes.int64)

    # pylint: disable=not-callable
    return self.return_type(ops.SparseTensor(sparse_indices, values, shape))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _event_shape(self):
    return array_ops.constant([], dtype=dtypes.int32)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __call__(self, input_data, input_h, input_c, params, is_training=True):
    """Run the forward step for the RNN model.

    Args:
      input_data: the input sequence to the RNN model.
      input_h: the initial hidden state for h.
      input_c: the initial hidden state for c. This is only relevant for LSTM.
      params: the parameter buffer created for this model.
      is_training: whether this operation will be used in training or inference.

    Returns:
      output: the output sequuence.
      output_h: the final state for h.
      output_c: the final state for c. This is only relevant for LSTM.
    """
    if self._rnn_mode != "lstm":
      # For model that doesn't take input_c, replace with a dummy tensor.
      input_c = array_ops.constant([], dtype=dtypes.float32)
    output, output_h, output_c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
        input=input_data,
        input_h=input_h,
        input_c=input_c,
        params=params,
        rnn_mode=self._rnn_mode,
        input_mode=self._input_mode,
        direction=self._direction,
        dropout=self._dropout,
        seed=self._seed,
        seed2=self._seed2,
        is_training=is_training)
    return (output, output_h, output_c)

  # TODO(zhengxq): add reading and writing canonical weights.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _apply_transform(self, input_tensors, **kwargs):
    """Applies the transformation to the `transform_input`.

    Args:
      input_tensors: a list of Tensors representing the input to
        the Transform.
      **kwargs: Additional keyword arguments, unused here.

    Returns:
        A namedtuple of Tensors representing the transformed output.
    """
    d = input_tensors[0]

    if self.strip_value is np.nan:
      strip_hot = math_ops.is_nan(d)
    else:
      strip_hot = math_ops.equal(d,
                                 array_ops.constant([self.strip_value],
                                                    dtype=d.dtype))
    keep_hot = math_ops.logical_not(strip_hot)

    length = array_ops.reshape(array_ops.shape(d), [])
    indices = array_ops.boolean_mask(math_ops.range(length), keep_hot)
    values = array_ops.boolean_mask(d, keep_hot)

    sparse_indices = array_ops.reshape(
        math_ops.cast(indices, dtypes.int64), [-1, 1])
    shape = math_ops.cast(array_ops.shape(d), dtypes.int64)

    # pylint: disable=not-callable
    return self.return_type(
        sparse_tensor.SparseTensor(sparse_indices, values, shape))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def constant(value, dtype=None, axes=None, name=None):
  """Creates a constant tensor.

  If `axes` includes any strings, shape is inferred from `value`. Otherwise,
  the sizes of the given `axes` are used to set `shape` for `tf.constant`.

  See tf.constant for more details.

  Args:
    value: The input tensor.
    dtype: The type of the returned tensor.
    axes: Optional Axes, list of strings or list of objects coercible to Axis
      objects. By default, axes are assumed to be an empty list (i.e., `value`
      is treated as a scalar).
    name: Optional op name.

  Returns:
    The tensor with elements set to zero.
  """
  with ops.name_scope(name, 'lt_constant', [value]) as scope:

    if axes is None:
      axes = []

    if isinstance(axes, core.Axes):
      axes = axes.values()

    if any(isinstance(ax, string_types) for ax in axes):
      # need to infer shape
      shape = None
    else:
      # axes already indicate shape
      axes = [core.as_axis(a) for a in axes]
      shape = [a.size for a in axes]

    op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
    return core.LabeledTensor(op, axes)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _event_shape(self):
    return array_ops.constant([], dtype=dtypes.int32)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _apply_transform(self, input_tensors, **kwargs):
    """Applies the transformation to the `transform_input`.

    Args:
      input_tensors: a list of Tensors representing the input to
        the Transform.
      **kwargs: Additional keyword arguments, unused here.

    Returns:
        A namedtuple of Tensors representing the transformed output.
    """
    d = input_tensors[0]

    if self.strip_value is np.nan:
      strip_hot = math_ops.is_nan(d)
    else:
      strip_hot = math_ops.equal(d,
                                 array_ops.constant([self.strip_value],
                                                    dtype=d.dtype))
    keep_hot = math_ops.logical_not(strip_hot)

    length = array_ops.reshape(array_ops.shape(d), [])
    indices = array_ops.boolean_mask(math_ops.range(length), keep_hot)
    values = array_ops.boolean_mask(d, keep_hot)

    sparse_indices = array_ops.reshape(
        math_ops.cast(indices, dtypes.int64), [-1, 1])
    shape = math_ops.cast(array_ops.shape(d), dtypes.int64)

    # pylint: disable=not-callable
    return self.return_type(
        sparse_tensor.SparseTensor(sparse_indices, values, shape))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_get_output_alternatives_none_provided(self):
    prediction_tensor = constant_op.constant(["bogus"])
    model_fn_ops = model_fn.ModelFnOps(
        model_fn.ModeKeys.INFER,
        predictions={"some_output": prediction_tensor},
        output_alternatives=None)

    output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
        model_fn_ops)

    self.assertEqual(
        {"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
            "some_output": prediction_tensor})},
        output_alternatives)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_get_output_alternatives_empty_provided_with_default(self):
    prediction_tensor = constant_op.constant(["bogus"])
    model_fn_ops = model_fn.ModelFnOps(
        model_fn.ModeKeys.INFER,
        predictions={"some_output": prediction_tensor},
        output_alternatives={})

    with self.assertRaises(ValueError) as e:
      saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")

    self.assertEqual("Requested default_output_alternative: WRONG, but "
                     "available output_alternatives are: []", str(e.exception))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_get_output_alternatives_empty_provided_no_default(self):
    prediction_tensor = constant_op.constant(["bogus"])
    model_fn_ops = model_fn.ModelFnOps(
        model_fn.ModeKeys.INFER,
        predictions={"some_output": prediction_tensor},
        output_alternatives={})

    output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
        model_fn_ops)

    self.assertEqual(
        {"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
            "some_output": prediction_tensor})},
        output_alternatives)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_build_all_signature_defs_legacy_input_fn_not_supported(self):
    """Tests that legacy input_fn returning (features, labels) raises error.

    serving_input_fn must return InputFnOps including a default input
    alternative.
    """
    input_features = constant_op.constant(["10"])
    input_ops = ({"features": input_features}, None)
    input_alternatives, _ = (
        saved_model_export_utils.get_input_alternatives(input_ops))
    output_1 = constant_op.constant(["1"])
    output_2 = constant_op.constant(["2"])
    output_3 = constant_op.constant(["3"])
    provided_output_alternatives = {
        "head-1": (constants.ProblemType.LINEAR_REGRESSION, {
            "some_output_1": output_1
        }),
        "head-2": (constants.ProblemType.CLASSIFICATION, {
            "some_output_2": output_2
        }),
        "head-3": (constants.ProblemType.UNSPECIFIED, {
            "some_output_3": output_3
        }),
    }
    model_fn_ops = model_fn.ModelFnOps(
        model_fn.ModeKeys.INFER,
        predictions={"some_output": constant_op.constant(["4"])},
        output_alternatives=provided_output_alternatives)
    output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
        model_fn_ops, "head-1"))

    with self.assertRaisesRegexp(
        ValueError, "A default input_alternative must be provided"):
      saved_model_export_utils.build_all_signature_defs(
          input_alternatives, output_alternatives, "head-1")
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_make_export_strategy(self):
    """Only tests that an ExportStrategy instance is created."""
    def _serving_input_fn():
      return array_ops.constant([1]), None
    export_strategy = saved_model_export_utils.make_export_strategy(
        serving_input_fn=_serving_input_fn,
        default_output_alternative_key="default",
        assets_extra={"from/path": "to/path"},
        as_text=False,
        exports_to_keep=5)
    self.assertTrue(
        isinstance(export_strategy, export_strategy_lib.ExportStrategy))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def constant(value, dtype=None, axes=None, name=None):
  """Creates a constant tensor.

  If `axes` includes any strings, shape is inferred from `value`. Otherwise,
  the sizes of the given `axes` are used to set `shape` for `tf.constant`.

  See tf.constant for more details.

  Args:
    value: The input tensor.
    dtype: The type of the returned tensor.
    axes: Optional Axes, list of strings or list of objects coercible to Axis
      objects. By default, axes are assumed to be an empty list (i.e., `value`
      is treated as a scalar).
    name: Optional op name.

  Returns:
    The tensor with elements set to zero.
  """
  with ops.name_scope(name, 'lt_constant', [value]) as scope:

    if axes is None:
      axes = []

    if isinstance(axes, core.Axes):
      axes = axes.values()

    if any(isinstance(ax, string_types) for ax in axes):
      # need to infer shape
      shape = None
    else:
      # axes already indicate shape
      axes = [core.as_axis(a) for a in axes]
      shape = [a.size for a in axes]

    op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
    return core.LabeledTensor(op, axes)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def __call__(self, input_data, input_h, input_c, params, is_training=True):
    """Runs the forward step for the RNN model.

    Args:
      input_data: the input sequence to the RNN model.
      input_h: the initial hidden state for h.
      input_c: the initial hidden state for c. This is only relevant for LSTM.
      params: the parameter buffer created for this model.
      is_training: whether this operation will be used in training or inference.

    Returns:
      output: the output sequuence.
      output_h: the final state for h.
      output_c: the final state for c. This is only relevant for LSTM.
    """
    if self._rnn_mode != "lstm":
      # For model that doesn't take input_c, replace with a dummy tensor.
      input_c = array_ops.constant([], dtype=dtypes.float32)
    output, output_h, output_c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
        input=input_data,
        input_h=input_h,
        input_c=input_c,
        params=params,
        rnn_mode=self._rnn_mode,
        input_mode=self._input_mode,
        direction=self._direction,
        dropout=self._dropout,
        seed=self._seed,
        seed2=self._seed2,
        is_training=is_training)
    return (output, output_h, output_c)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _padding(sequences, num_unroll):
  """For a dictionary of sequences, pads tensors to a multiple of `num_unroll`.

  Args:
    sequences: dictionary with `Tensor` values.
    num_unroll: int specifying to what multiple to pad sequences to.
  Returns:
    length: Scalar `Tensor` of dimension 0 of all the values in sequences.
    padded_sequence: Dictionary of sequences that are padded to a multiple of
      `num_unroll`.
  Raises:
    ValueError: If `num_unroll` not an int or sequences not a dictionary from
                string to `Tensor`.
  """
  if not isinstance(num_unroll, numbers.Integral):
    raise ValueError("Unsupported num_unroll expected int, got: %s" %
                     str(num_unroll))
  if not isinstance(sequences, dict):
    raise TypeError("Unsupported sequences expected dict, got: %s" %
                    str(sequences))
  for key, value in sequences.items():
    if not isinstance(key, six.string_types):
      raise TypeError("Unsupported sequences key expected string, got: %s" %
                      str(key))
  if not sequences:
    return 0, {}

  sequences_dict = {}
  for key, value in sequences.items():
    sequences_dict[key] = ops.convert_to_tensor(value)

  lengths = [array_ops.shape(value)[0] for value in sequences_dict.values()]
  length = lengths[0]
  all_lengths_equal = [
      control_flow_ops.Assert(
          math_ops.equal(l, length), [string_ops.string_join(
              ["All sequence lengths must match, but received lengths: ",
               string_ops.as_string(lengths)])])
      for l in lengths]

  length = control_flow_ops.with_dependencies(all_lengths_equal, length)
  unroll = array_ops.constant(num_unroll)
  padded_length = length + ((unroll - (length % unroll)) % unroll)
  padded_sequences = {}
  for key, value in sequences_dict.items():
    # 1. create shape of paddings
    # first dimension of value will be increased by num_paddings to
    # padded_length
    num_paddings = [padded_length - array_ops.shape(value)[0]]
    # the shape of the paddings that we concat with the original value will be
    # [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
    #  tf.shape(value)[tf.rank(value) - 1])]
    padding_shape = array_ops.concat(0, (
        num_paddings, array_ops.shape(value)[1:]))
    # 2. fill padding shape with dummies
    dummy = array_ops.constant("" if value.dtype == dtypes.string else 0,
                               dtype=value.dtype)
    paddings = array_ops.fill(dims=padding_shape, value=dummy)
    # 3. concat values with paddings
    padded_sequences[key] = array_ops.concat(0, [value, paddings])
  return length, padded_sequences
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_build_all_signature_defs(self):
    input_features = constant_op.constant(["10"])
    input_example = constant_op.constant(["11"])
    input_ops = input_fn_utils.InputFnOps({
        "features": input_features
    }, None, {"default input": input_example})
    input_alternatives, _ = (
        saved_model_export_utils.get_input_alternatives(input_ops))
    output_1 = constant_op.constant(["1"])
    output_2 = constant_op.constant(["2"])
    output_3 = constant_op.constant(["3"])
    provided_output_alternatives = {
        "head-1": (constants.ProblemType.LINEAR_REGRESSION, {
            "some_output_1": output_1
        }),
        "head-2": (constants.ProblemType.CLASSIFICATION, {
            "some_output_2": output_2
        }),
        "head-3": (constants.ProblemType.UNSPECIFIED, {
            "some_output_3": output_3
        }),
    }
    model_fn_ops = model_fn.ModelFnOps(
        model_fn.ModeKeys.INFER,
        predictions={"some_output": constant_op.constant(["4"])},
        output_alternatives=provided_output_alternatives)
    output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
        model_fn_ops, "head-1"))

    signature_defs = saved_model_export_utils.build_all_signature_defs(
        input_alternatives, output_alternatives, "head-1")

    expected_signature_defs = {
        "serving_default":
            signature_def_utils.regression_signature_def(input_example,
                                                         output_1),
        "default_input_alternative:head-1":
            signature_def_utils.regression_signature_def(input_example,
                                                         output_1),
        "default_input_alternative:head-2":
            signature_def_utils.classification_signature_def(input_example,
                                                             output_2, None),
        "default_input_alternative:head-3":
            signature_def_utils.predict_signature_def({
                "input": input_example
            }, {"output": output_3}),
        # "features_input_alternative:head-1":
        #     signature_def_utils.regression_signature_def(input_features,
        #                                                  output_1),
        # "features_input_alternative:head-2":
        #     signature_def_utils.classification_signature_def(input_features,
        #                                                      output_2, None),
        # "features_input_alternative:head-3":
        #     signature_def_utils.predict_signature_def({
        #         "input": input_features
        #     }, {"output": output_3}),
    }

    self.assertDictEqual(expected_signature_defs, signature_defs)