Python tensorflow.python.ops.array_ops 模块,placeholder() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.placeholder()

项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testHStack(self):
    with self.test_session(force_gpu=True):
      p1 = array_ops.placeholder(dtypes.complex64, shape=[4, 4])
      p2 = array_ops.placeholder(dtypes.complex64, shape=[4, 4])
      c = array_ops.concat([p1, p2], 0)
      params = {
          p1: (np.random.rand(4, 4) + 
               1j*np.random.rand(4, 4)).astype(np.complex64),
          p2: (np.random.rand(4, 4) + 
               1j*np.random.rand(4, 4)).astype(np.complex64),
      }
      result = c.eval(feed_dict=params)

    self.assertEqual(result.shape, c.get_shape())
    self.assertAllEqual(result[:4, :], params[p1])
    self.assertAllEqual(result[4:, :], params[p2])
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testVStack(self):
    with self.test_session(force_gpu=True):
      p1 = array_ops.placeholder(dtypes.complex64, shape=[4, 4])
      p2 = array_ops.placeholder(dtypes.complex64, shape=[4, 4])
      c = array_ops.concat([p1, p2], 1)
      params = {
          p1: (np.random.rand(4, 4) + 
               1j*np.random.rand(4, 4)).astype(np.complex64),
          p2: (np.random.rand(4, 4) + 
               1j*np.random.rand(4, 4)).astype(np.complex64),
      }
      result = c.eval(feed_dict=params)

    self.assertEqual(result.shape, c.get_shape())
    self.assertAllEqual(result[:, :4], params[p1])
    self.assertAllEqual(result[:, 4:], params[p2])
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testGradientWithUnknownInputDim(self):
    with self.test_session(use_gpu=True):
      x = array_ops.placeholder(dtypes.complex64)
      y = array_ops.placeholder(dtypes.complex64)
      c = array_ops.concat([x, y], 2)

      output_shape = [10, 2, 9]
      grad_inp = (np.random.rand(*output_shape) + 
                  1j*np.random.rand(*output_shape)).astype(np.complex64)
      grad_tensor = constant_op.constant(
          [inp for inp in grad_inp.flatten()], shape=output_shape)

      grad = gradients_impl.gradients([c], [x, y], [grad_tensor])
      concated_grad = array_ops.concat(grad, 2)
      params = {
          x: (np.random.rand(10, 2, 3) + 
              1j*np.random.rand(10, 2, 3)).astype(np.complex64),
          y: (np.random.rand(10, 2, 6) + 
              1j*np.random.rand(10, 2, 6)).astype(np.complex64),
      }
      result = concated_grad.eval(feed_dict=params)

      self.assertAllEqual(result, grad_inp)
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testShapeWithUnknownConcatDim(self):
    p1 = array_ops.placeholder(dtypes.complex64)
    c1 = constant_op.constant(np.complex64(10.0+0j), shape=[4, 4, 4, 4])
    p2 = array_ops.placeholder(dtypes.complex64)
    c2 = constant_op.constant(np.complex64(20.0+0j), shape=[4, 4, 4, 4])
    dim = array_ops.placeholder(dtypes.int32)
    concat = array_ops.concat([p1, c1, p2, c2], dim)
    self.assertEqual(4, concat.get_shape().ndims)

    # All dimensions unknown.
    concat2 = array_ops.concat([p1, p2], dim)
    self.assertEqual(None, concat2.get_shape())

    # Rank doesn't match.
    c3 = constant_op.constant(np.complex64(30.0+0j), shape=[4, 4, 4])
    with self.assertRaises(ValueError):
      array_ops.concat([p1, c1, p2, c3], dim)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def is_sparse(tensor):
      """Returns whether a tensor is a sparse tensor.

      Arguments:
          tensor: A tensor instance.

      Returns:
          A boolean.

      Example:
      ```python
          >>> from keras import backend as K
          >>> a = K.placeholder((2, 2), sparse=False)
          >>> print(K.is_sparse(a))
          False
          >>> b = K.placeholder((2, 2), sparse=True)
          >>> print(K.is_sparse(b))
          True
"""
  return isinstance(tensor, sparse_tensor.SparseTensor)

```

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def to_dense(tensor):
      """Converts a sparse tensor into a dense tensor and returns it.

      Arguments:
          tensor: A tensor instance (potentially sparse).

      Returns:
          A dense tensor.

      Examples:
      ```python
          >>> from keras import backend as K
          >>> b = K.placeholder((2, 2), sparse=True)
          >>> print(K.is_sparse(b))
          True
          >>> c = K.to_dense(b)
          >>> print(K.is_sparse(c))
          False
"""
  if is_sparse(tensor):
    return sparse_ops.sparse_tensor_to_dense(tensor)
  else:
    return tensor

```

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def ndim(x):
      """Returns the number of axes in a tensor, as an integer.

      Arguments:
          x: Tensor or variable.

      Returns:
          Integer (scalar), number of axes.

      Examples:
      ```python
          >>> from keras import backend as K
          >>> input = K.placeholder(shape=(2, 4, 5))
          >>> val = np.array([[1, 2], [3, 4]])
          >>> kvar = K.variable(value=val)
          >>> K.ndim(input)
          3
          >>> K.ndim(kvar)
          2
"""
  dims = x.get_shape()._dims
  if dims is not None:
    return len(dims)
  return None

```

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def batch_set_value(tuples):
      """Sets the values of many tensor variables at once.

      Arguments:
          tuples: a list of tuples `(tensor, value)`.
              `value` should be a Numpy array.
      """
      if tuples:
        assign_ops = []
        feed_dict = {}
        for x, value in tuples:
          value = np.asarray(value)
          tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
          if hasattr(x, '_assign_placeholder'):
            assign_placeholder = x._assign_placeholder
            assign_op = x._assign_op
          else:
            assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
            assign_op = x.assign(assign_placeholder)
            x._assign_placeholder = assign_placeholder
            x._assign_op = assign_op
          assign_ops.append(assign_op)
          feed_dict[assign_placeholder] = value
        get_session().run(assign_ops, feed_dict=feed_dict)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def function(inputs, outputs, updates=None, **kwargs):
      """Instantiates a Keras function.

      Arguments:
          inputs: List of placeholder tensors.
          outputs: List of output tensors.
          updates: List of update ops.
          **kwargs: Passed to `tf.Session.run`.

      Returns:
          Output values as Numpy arrays.

      Raises:
          ValueError: if invalid kwargs are passed in.
      """
      if kwargs:
        for key in kwargs:
          if (key not in tf_inspect.getargspec(session_module.Session.run)[0] and
              key not in tf_inspect.getargspec(Function.__init__)[0]):
            msg = ('Invalid argument "%s" passed to K.function with Tensorflow '
                   'backend') % key
            raise ValueError(msg)
      return Function(inputs, outputs, updates=updates, **kwargs)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_config(self):
            config = {
                'batch_input_shape': self.batch_input_shape,
                'dtype': self.dtype,
                'sparse': self.sparse,
                'name': self.name
            }
            return config

        # 1. Layer of tf, build a graph and other attributes
        # 2. Layer of tf.keras, build more attributes
        # 3. InputLayer of tf.keras, build input tensor as placeholder, and
        # 4. save InputLayer as content of history stored in input_tensor._keras_history
        # 5. create an input node and store input_tensor as input-output-tensors, store InputLayer as inbound_layers, outbound_layers
        # 6. then save this Node inside outbound_layers.inbound_nodes or inbound_layers.outbound_nodes
        # 7. input_tensor can be accessed through InputLayer->Node->tensors
        # 8. return input_tensor
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def make_placeholder_from_tensor(t, scope=None):
  """Create a tf.placeholder for the Graph Editor.

  Note that the correct graph scope must be set by the calling function.

  Args:
    t: a tf.Tensor whose name will be used to create the placeholder
      (see function placeholder_name).
    scope: absolute scope within which to create the placeholder. None
      means that the scope of t is preserved. "" means the root scope.
  Returns:
    A newly created tf.placeholder.
  Raises:
    TypeError: if t is not None or a tf.Tensor.
  """
  return tf_array_ops.placeholder(dtype=t.dtype, shape=t.get_shape(),
                                  name=placeholder_name(t, scope=scope))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def input_builder(self):
    """Builds inputs in the graph.

    Returns:
      Two placeholders for inputs and outputs.
    """
    input_shape = [None] + self.input_shape[1:]
    self._input_placeholder = array_ops.placeholder(
        dtypes.as_dtype(self._input_dtype),
        input_shape,
        name='input')
    if self.output_shape is None:
      self._output_placeholder = None
    else:
      output_shape = [None] + self.output_shape[1:]
      self._output_placeholder = array_ops.placeholder(
          dtypes.as_dtype(self._output_dtype),
          output_shape,
          name='output')
    return self._input_placeholder, self._output_placeholder
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def create_placeholders_from_signatures(signatures):
  """Creates placeholders from given signatures.

  Args:
    signatures: Dict of `TensorSignature` objects or single `TensorSignature`,
      or `None`.

  Returns:
    Dict of `tf.placeholder` objects or single `tf.placeholder`, or `None`.
  """
  if signatures is None:
    return None
  if not isinstance(signatures, dict):
    return signatures.get_placeholder()
  return {
      key: signatures[key].get_placeholder()
      for key in signatures}
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def make_place_holder_tensors_for_base_features(feature_columns):
  """Returns placeholder tensors for inference.

  Args:
    feature_columns: An iterable containing all the feature columns. All items
      should be instances of classes derived from _FeatureColumn.
  Returns:
    A dict mapping feature keys to SparseTensors (sparse columns) or
    placeholder Tensors (dense columns).
  """
  # Get dict mapping features to FixedLenFeature or VarLenFeature values.
  dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
  placeholders = {}
  for column_name, column_type in dict_for_parse_example.items():
    if isinstance(column_type, parsing_ops.VarLenFeature):
      # Sparse placeholder for sparse tensors.
      placeholders[column_name] = array_ops.sparse_placeholder(
          column_type.dtype, name="Placeholder_{}".format(column_name))
    else:
      # Simple placeholder for dense tensors.
      placeholders[column_name] = array_ops.placeholder(
          column_type.dtype,
          shape=(None, column_type.shape[0]),
          name="Placeholder_{}".format(column_name))
  return placeholders
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def make_placeholder_from_tensor(t, scope=None):
  """Create a `tf.placeholder` for the Graph Editor.

  Note that the correct graph scope must be set by the calling function.

  Args:
    t: a `tf.Tensor` whose name will be used to create the placeholder
      (see function placeholder_name).
    scope: absolute scope within which to create the placeholder. None
      means that the scope of `t` is preserved. `""` means the root scope.
  Returns:
    A newly created `tf.placeholder`.
  Raises:
    TypeError: if `t` is not `None` or a `tf.Tensor`.
  """
  return tf_array_ops.placeholder(
      dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(
          t, scope=scope))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def input_builder(self):
    """Builds inputs in the graph.

    Returns:
      Two placeholders for inputs and outputs.
    """
    input_shape = [None] + self.input_shape[1:]
    self._input_placeholder = array_ops.placeholder(
        dtypes.as_dtype(self._input_dtype),
        input_shape,
        name='input')
    if self.output_shape is None:
      self._output_placeholder = None
    else:
      output_shape = [None] + self.output_shape[1:]
      self._output_placeholder = array_ops.placeholder(
          dtypes.as_dtype(self._output_dtype),
          output_shape,
          name='output')
    return self._input_placeholder, self._output_placeholder
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def create_placeholders_from_signatures(signatures):
  """Creates placeholders from given signatures.

  Args:
    signatures: Dict of `TensorSignature` objects or single `TensorSignature`,
      or `None`.

  Returns:
    Dict of `tf.placeholder` objects or single `tf.placeholder`, or `None`.
  """
  if signatures is None:
    return None
  if not isinstance(signatures, dict):
    return signatures.get_placeholder()
  return {
      key: signatures[key].get_placeholder()
      for key in signatures}
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def make_place_holder_tensors_for_base_features(feature_columns):
  """Returns placeholder tensors for inference.

  Args:
    feature_columns: An iterable containing all the feature columns. All items
      should be instances of classes derived from _FeatureColumn.
  Returns:
    A dict mapping feature keys to SparseTensors (sparse columns) or
    placeholder Tensors (dense columns).
  """
  # Get dict mapping features to FixedLenFeature or VarLenFeature values.
  dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
  placeholders = {}
  for column_name, column_type in dict_for_parse_example.items():
    if isinstance(column_type, parsing_ops.VarLenFeature):
      # Sparse placeholder for sparse tensors.
      placeholders[column_name] = array_ops.sparse_placeholder(
          column_type.dtype, name="Placeholder_{}".format(column_name))
    else:
      # Simple placeholder for dense tensors.
      placeholders[column_name] = array_ops.placeholder(
          column_type.dtype,
          shape=(None, column_type.shape[0]),
          name="Placeholder_{}".format(column_name))
  return placeholders
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_with_shape_none(self):
    with self.test_session():
      tensor_no_shape = array_ops.placeholder(dtypes.float32)

      compatible_shape = [2, 2]
      with_present_2x2 = tensor_util.with_shape(compatible_shape,
                                                tensor_no_shape)
      self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
      with_future_2x2 = tensor_util.with_shape(
          constant_op.constant(compatible_shape), tensor_no_shape)

      array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
      for tensor_2x2 in [with_present_2x2, with_future_2x2]:
        np.testing.assert_array_equal(array_2x2,
                                      tensor_2x2.eval({
                                          tensor_no_shape: array_2x2
                                      }))
        self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
                                tensor_2x2.eval,
                                {tensor_no_shape: [42.0, 43.0]})
        self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
                                tensor_2x2.eval, {tensor_no_shape: [42.0]})
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def make_placeholder_from_tensor(t, scope=None):
  """Create a `tf.placeholder` for the Graph Editor.

  Note that the correct graph scope must be set by the calling function.

  Args:
    t: a `tf.Tensor` whose name will be used to create the placeholder
      (see function placeholder_name).
    scope: absolute scope within which to create the placeholder. None
      means that the scope of `t` is preserved. `""` means the root scope.
  Returns:
    A newly created `tf.placeholder`.
  Raises:
    TypeError: if `t` is not `None` or a `tf.Tensor`.
  """
  return tf_array_ops.placeholder(
      dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(
          t, scope=scope))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def make_placeholder_from_dtype_and_shape(dtype, shape=None, scope=None):
  """Create a tf.placeholder for the Graph Editor.

  Note that the correct graph scope must be set by the calling function.
  The placeholder is named using the function placeholder_name (with no
  tensor argument).

  Args:
    dtype: the tensor type.
    shape: the tensor shape (optional).
    scope: absolute scope within which to create the placeholder. None
      means that the scope of t is preserved. "" means the root scope.
  Returns:
    A newly created tf.placeholder.
  """
  return tf_array_ops.placeholder(
      dtype=dtype, shape=shape, name=placeholder_name(scope=scope))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testConstructionWithUnknownShapes(self):
    mu = array_ops.placeholder(dtypes.float32)
    sigma = array_ops.placeholder(dtypes.float32)
    obs = array_ops.placeholder(dtypes.float32)
    z = st.ObservedStochasticTensor(
        distributions.Normal(
            loc=mu, scale=sigma), value=obs)

    mu2 = array_ops.placeholder(dtypes.float32, shape=[None])
    sigma2 = array_ops.placeholder(dtypes.float32, shape=[None])
    obs2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
    z2 = st.ObservedStochasticTensor(
        distributions.Normal(
            loc=mu2, scale=sigma2), value=obs2)

    coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
    self.assertEqual(coll, [z, z2])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
    weights = 2.3
    tf_predictions = array_ops.placeholder(
        dtypes.float32, shape=self._predictions.shape)
    tf_labels = array_ops.placeholder(dtypes.float32, shape=self._labels.shape)
    loss = loss_ops.mean_pairwise_squared_error(
        predictions=tf_predictions,
        labels=tf_labels,
        weights=constant_op.constant(weights))
    with self.test_session() as sess:
      loss = sess.run(loss,
                      feed_dict={
                          tf_predictions: self._predictions,
                          tf_labels: self._labels,
                      })
      self.assertAlmostEqual(weights * np.sum(self._expected_losses), loss, 3)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
    weights = np.asarray([1.2, 3.4]).reshape((2, 1))
    expected_losses = np.multiply(weights, self._expected_losses)

    tf_predictions = array_ops.placeholder(
        dtypes.float32, shape=self._predictions.shape)
    tf_labels = array_ops.placeholder(dtypes.int32, shape=self._labels.shape)
    loss = loss_ops.mean_pairwise_squared_error(
        predictions=tf_predictions,
        labels=tf_labels,
        weights=constant_op.constant(
            weights, shape=[2]))

    with self.test_session() as sess:
      loss = sess.run(loss,
                      feed_dict={
                          tf_predictions: self._predictions,
                          tf_labels: self._labels,
                      })
      self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_seq2seq_inputs(self):
    inp = np.array([[[1, 0], [0, 1], [1, 0]], [[0, 1], [1, 0], [0, 1]]])
    out = np.array([[[0, 1, 0], [1, 0, 0]], [[1, 0, 0], [0, 1, 0]]])
    with self.test_session() as session:
      x = array_ops.placeholder(dtypes.float32, [2, 3, 2])
      y = array_ops.placeholder(dtypes.float32, [2, 2, 3])
      in_x, in_y, out_y = ops.seq2seq_inputs(x, y, 3, 2)
      enc_inp = session.run(in_x, feed_dict={x.name: inp})
      dec_inp = session.run(in_y, feed_dict={x.name: inp, y.name: out})
      dec_out = session.run(out_y, feed_dict={x.name: inp, y.name: out})
    # Swaps from batch x len x height to list of len of batch x height.
    self.assertAllEqual(enc_inp, np.swapaxes(inp, 0, 1))
    self.assertAllEqual(dec_inp, [[[0, 0, 0], [0, 0, 0]],
                                  [[0, 1, 0], [1, 0, 0]],
                                  [[1, 0, 0], [0, 1, 0]]])
    self.assertAllEqual(dec_out, [[[0, 1, 0], [1, 0, 0]],
                                  [[1, 0, 0], [0, 1, 0]],
                                  [[0, 0, 0], [0, 0, 0]]])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_rnn_decoder(self):
    with self.test_session():
      decoder_inputs = [
          array_ops.placeholder(dtypes.float32, [2, 2]) for _ in range(3)
      ]
      encoding = array_ops.placeholder(dtypes.float32, [2, 2])
      cell = core_rnn_cell_impl.GRUCell(2)
      outputs, states, sampling_outputs, sampling_states = (
          ops.rnn_decoder(decoder_inputs, encoding, cell))
      self.assertEqual(len(outputs), 3)
      self.assertEqual(outputs[0].get_shape(), [2, 2])
      self.assertEqual(len(states), 4)
      self.assertEqual(states[0].get_shape(), [2, 2])
      self.assertEqual(len(sampling_outputs), 3)
      self.assertEqual(sampling_outputs[0].get_shape(), [2, 2])
      self.assertEqual(len(sampling_states), 4)
      self.assertEqual(sampling_states[0].get_shape(), [2, 2])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testTensorSignaturePlaceholders(self):
    placeholder_a = array_ops.placeholder(
        name='test', shape=[None, 100], dtype=dtypes.int32)
    signatures = tensor_signature.create_signatures(placeholder_a)
    placeholder_out = tensor_signature.create_placeholders_from_signatures(
        signatures)
    self.assertEqual(placeholder_out.dtype, placeholder_a.dtype)
    self.assertTrue(placeholder_out.get_shape().is_compatible_with(
        placeholder_a.get_shape()))
    self.assertTrue(
        tensor_signature.tensors_compatible(placeholder_out, signatures))

    inputs = {'a': placeholder_a}
    signatures = tensor_signature.create_signatures(inputs)
    placeholders_out = tensor_signature.create_placeholders_from_signatures(
        signatures)
    self.assertEqual(placeholders_out['a'].dtype, placeholder_a.dtype)
    self.assertTrue(placeholders_out['a'].get_shape().is_compatible_with(
        placeholder_a.get_shape()))
    self.assertTrue(
        tensor_signature.tensors_compatible(placeholders_out, signatures))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def create_placeholders_from_signatures(signatures):
  """Creates placeholders from given signatures.

  Args:
    signatures: Dict of `TensorSignature` objects or single `TensorSignature`,
      or `None`.

  Returns:
    Dict of `tf.placeholder` objects or single `tf.placeholder`, or `None`.
  """
  if signatures is None:
    return None
  if not isinstance(signatures, dict):
    return signatures.get_placeholder()
  return {
      key: signatures[key].get_placeholder()
      for key in signatures}
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testExportMonitorInputFeatureKeyNoFeatures(self):
    random.seed(42)
    input_feature_key = 'my_example_key'

    def _serving_input_fn():
      return {
          input_feature_key:
              array_ops.placeholder(
                  dtype=dtypes.string, shape=(1,))
      }, None

    monitor = learn.monitors.ExportMonitor(
        every_n_steps=1,
        export_dir=tempfile.mkdtemp() + 'export/',
        input_fn=_serving_input_fn,
        input_feature_key=input_feature_key,
        exports_to_keep=2,
        signature_fn=export.generic_signature_fn)
    regressor = learn.LinearRegressor(feature_columns=[_X_COLUMN])
    with self.assertRaisesRegexp(KeyError, _X_KEY):
      regressor.fit(input_fn=_training_input_fn, steps=10, monitors=[monitor])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testExportMonitorInputFeature(self):
    random.seed(42)
    input_feature_key = 'my_example_key'

    def _serving_input_fn():
      return {
          input_feature_key:
              array_ops.placeholder(
                  dtype=dtypes.string, shape=(1,)),
          _X_KEY:
              random_ops.random_uniform(
                  shape=(1,), minval=0.0, maxval=1000.0)
      }, None

    export_dir = tempfile.mkdtemp() + 'export/'
    monitor = learn.monitors.ExportMonitor(
        every_n_steps=1,
        export_dir=export_dir,
        input_fn=_serving_input_fn,
        input_feature_key=input_feature_key,
        exports_to_keep=2,
        signature_fn=export.generic_signature_fn)
    regressor = learn.LinearRegressor(feature_columns=[_X_COLUMN])
    regressor.fit(input_fn=_training_input_fn, steps=10, monitors=[monitor])
    self._assert_export(monitor, export_dir, 'generic_signature')
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDynamicOutputSizeWithRateOneValidPadding(self):
    num_filters = 32
    input_size = [5, 9, 11, 3]
    expected_size = [None, None, None, num_filters]
    expected_size_dynamic = [5, 7, 9, num_filters]

    with self.test_session():
      images = array_ops.placeholder(np.float32,
                                     [None, None, None, input_size[3]])
      output = layers_lib.convolution2d(
          images, num_filters, [3, 3], rate=1, padding='VALID')
      variables_lib.global_variables_initializer().run()
      self.assertEqual(output.op.name, 'Conv/Relu')
      self.assertListEqual(output.get_shape().as_list(), expected_size)
      eval_output = output.eval({images: np.zeros(input_size, np.float32)})
      self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):
    if test.is_gpu_available(cuda_only=True):
      num_filters = 32
      input_size = [5, 3, 9, 11]
      expected_size = [None, num_filters, None, None]
      expected_size_dynamic = [5, num_filters, 7, 9]

      with self.test_session(use_gpu=True):
        images = array_ops.placeholder(np.float32,
                                       [None, input_size[1], None, None])
        output = layers_lib.convolution2d(
            images,
            num_filters, [3, 3],
            rate=1,
            padding='VALID',
            data_format='NCHW')
        variables_lib.global_variables_initializer().run()
        self.assertEqual(output.op.name, 'Conv/Relu')
        self.assertListEqual(output.get_shape().as_list(), expected_size)
        eval_output = output.eval({images: np.zeros(input_size, np.float32)})
        self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDynamicOutputSizeWithRateTwoValidPadding(self):
    num_filters = 32
    input_size = [5, 9, 11, 3]
    expected_size = [None, None, None, num_filters]
    expected_size_dynamic = [5, 5, 7, num_filters]

    with self.test_session():
      images = array_ops.placeholder(np.float32,
                                     [None, None, None, input_size[3]])
      output = layers_lib.convolution2d(
          images, num_filters, [3, 3], rate=2, padding='VALID')
      variables_lib.global_variables_initializer().run()
      self.assertEqual(output.op.name, 'Conv/Relu')
      self.assertListEqual(output.get_shape().as_list(), expected_size)
      eval_output = output.eval({images: np.zeros(input_size, np.float32)})
      self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDynamicOutputSizeWithStrideTwoSamePadding(self):
    num_filters = 32
    input_size = [5, 9, 11, 3]
    expected_size = [None, None, None, num_filters]
    expected_size_dynamic = [5, 18, 22, num_filters]

    with self.test_session():
      images = array_ops.placeholder(np.float32,
                                     [None, None, None, input_size[3]])
      output = layers_lib.conv2d_transpose(
          images, num_filters, [3, 3], stride=[2, 2], padding='SAME')
      variables_lib.global_variables_initializer().run()
      self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
      self.assertListEqual(output.get_shape().as_list(), expected_size)
      eval_output = output.eval({images: np.zeros(input_size, np.float32)})
      self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testHorzConvWithBlankImageAndPlaceholder(self):
    image = array_ops.placeholder(dtypes.float32, shape=(None, None, None, 1))
    horz_gradients = layers_lib.conv2d_in_plane(
        image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.test_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients,
                        feed_dict={image: np.ones((1, 10, 10, 1))})
      expected = np.zeros((1, 10, 9, 1))

      self.assertAllEqual(result, expected)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testIncompleteShape(self):
    """Test `_inner_flatten` shape inference for incomplete shapes."""
    shape = [2, None, 4, None, 5, 6]
    inputs = array_ops.placeholder(dtypes.int32)
    inputs.set_shape(shape)

    flattened1 = _layers._inner_flatten(inputs, 1)
    self.assertEqual([None], flattened1.get_shape().as_list())

    flattened2 = _layers._inner_flatten(inputs, 2)
    self.assertEqual([2, None], flattened2.get_shape().as_list())

    flattened3 = _layers._inner_flatten(inputs, 3)
    self.assertEqual([2, None, None], flattened3.get_shape().as_list())

    flattened4 = _layers._inner_flatten(inputs, 4)
    self.assertEqual([2, None, 4, None], flattened4.get_shape().as_list())

    flattened5 = _layers._inner_flatten(inputs, 5)
    self.assertEqual([2, None, 4, None, 30], flattened5.get_shape().as_list())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testConvWithInputsViaPlaceHolder(self):
    height, width = 3, 3
    images_placeholder = array_ops.placeholder(
        dtypes.float32, shape=(None, None, None, 3))
    net = layers_lib.separable_conv2d(
        images_placeholder,
        8, [3, 3],
        2,
        normalizer_fn=_layers.batch_norm,
        normalizer_params={},
        scope='conv1')
    init_op = variables_lib.global_variables_initializer()
    with self.test_session() as sess:
      images = np.random.rand(5, height, width, 3)
      sess.run(init_op)
      sess.run(net, feed_dict={images_placeholder: images})
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testSoftmax3DUnknownSize(self):
    logits = np.ones((2, 3, 2))
    logits[0, 0, 0] = 0
    logits[1, 1, 1] = 0
    logit_placeholder = array_ops.placeholder(
        dtypes.float32, shape=(None, None, 2))
    feed_dict = {logit_placeholder: logits}
    exp_prediction = 0.5 * np.ones((2, 3, 2))
    exp_prediction[0, 0, 0] = self.low
    exp_prediction[0, 0, 1] = self.high
    exp_prediction[1, 1, 0] = self.high
    exp_prediction[1, 1, 1] = self.low

    prediction = _layers.softmax(logit_placeholder)
    with self.test_session() as sess:
      prediction = sess.run(prediction, feed_dict=feed_dict)
      self.assertAllClose(exp_prediction, prediction)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testKnownRankUnknownDimsSucceeds(self):
    height, width = 2, 3

    for dim in range(3):
      placeholder_value = np.ones((height, width, 3))
      shape = [height, width, 3]
      del shape[dim]
      expected = np.ones(shape)

      image = array_ops.placeholder(dtypes.float32, (None, None, 3))
      output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
      norms = math_ops.sqrt(
          math_ops.reduce_sum(
              math_ops.square(output), reduction_indices=dim))

      with self.test_session():
        actual = norms.eval({image: placeholder_value})
        self.assertAllClose(expected, actual, 1e-4, 1e-4)


# TODO(b/28426988): Add separate tests for non-legacy versions.
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_shapes_variable_first_dim(self):
    # first dimension is not known statically.
    x = array_ops.placeholder(dtypes.float32, shape=[None, 4, 3])
    y = _layers.legacy_fully_connected(x, 1)
    # in the output we still only know the 2nd and 3rd dimensions statically.
    self.assertEqual(y.get_shape().as_list(), [None, 4, 1])
    with self.test_session() as sess:
      variables_lib.global_variables_initializer().run()
      # we can feed in input with first dimension 2
      shape_value = sess.run(array_ops.shape(y),
                             feed_dict={x: self.input_3_dim_arr})
      self.assertAllClose(shape_value, [2, 4, 1])
      # we can feed in input with first dimension 1
      shape_value = sess.run(array_ops.shape(y),
                             feed_dict={x: [self.input_3_dim_arr[0]]})
      self.assertAllClose(shape_value, [1, 4, 1])
      # we cannot feed in input with inconsistent dimensions
      with self.assertRaises(ValueError):
        sess.run(array_ops.shape(y), feed_dict={x: [[[]]]})
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNoGlobalStepWithDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g):
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        with self.assertRaisesRegexp(
            ValueError, "global_step is required for learning_rate_decay_fn"):
          optimizers_lib.optimize_loss(
              loss,
              global_step=None,
              learning_rate=0.1,
              learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
              optimizer=optimizer,
              update_ops=[update_op])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def make_place_holder_tensors_for_base_features(feature_columns):
  """Returns placeholder tensors for inference.

  Args:
    feature_columns: An iterable containing all the feature columns. All items
      should be instances of classes derived from _FeatureColumn.
  Returns:
    A dict mapping feature keys to SparseTensors (sparse columns) or
    placeholder Tensors (dense columns).
  """
  # Get dict mapping features to FixedLenFeature or VarLenFeature values.
  dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
  placeholders = {}
  for column_name, column_type in dict_for_parse_example.items():
    if isinstance(column_type, parsing_ops.VarLenFeature):
      # Sparse placeholder for sparse tensors.
      placeholders[column_name] = array_ops.sparse_placeholder(
          column_type.dtype, name="Placeholder_{}".format(column_name))
    else:
      # Simple placeholder for dense tensors.
      placeholders[column_name] = array_ops.placeholder(
          column_type.dtype,
          shape=(None, column_type.shape[0]),
          name="Placeholder_{}".format(column_name))
  return placeholders
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test1dWeightedValues_placeholders(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
      values = array_ops.placeholder(dtype=dtypes_lib.float32)

      # Create the queue that populates the weighted labels.
      weights_queue = data_flow_ops.FIFOQueue(
          4, dtypes=dtypes_lib.float32, shapes=(1,))
      _enqueue_vector(sess, weights_queue, 1, shape=(1,))
      _enqueue_vector(sess, weights_queue, 0, shape=(1,))
      _enqueue_vector(sess, weights_queue, 0, shape=(1,))
      _enqueue_vector(sess, weights_queue, 1, shape=(1,))
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values, weights)

      variables.local_variables_initializer().run()
      for i in range(4):
        update_op.eval(feed_dict={values: feed_values[i]})
      self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test2dWeightedValues_placeholders(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
      values = array_ops.placeholder(dtype=dtypes_lib.float32)

      # Create the queue that populates the weighted labels.
      weights_queue = data_flow_ops.FIFOQueue(
          4, dtypes=dtypes_lib.float32, shapes=(2,))
      _enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
      _enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
      _enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
      _enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values, weights)

      variables.local_variables_initializer().run()
      for i in range(4):
        update_op.eval(feed_dict={values: feed_values[i]})
      self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testWeighted1d_placeholders(self):
    predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
    labels = array_ops.placeholder(dtype=dtypes_lib.float32)
    feed_dict = {
        predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
        labels: ((0, 1, 1, 0), (1, 0, 0, 1))
    }
    precision, update_op = metrics.streaming_precision(
        predictions, labels, weights=constant_op.constant([[2], [5]]))

    with self.test_session():
      variables.local_variables_initializer().run()
      weighted_tp = 2.0 + 5.0
      weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
      expected_precision = weighted_tp / weighted_positives
      self.assertAlmostEqual(
          expected_precision, update_op.eval(feed_dict=feed_dict))
      self.assertAlmostEqual(
          expected_precision, precision.eval(feed_dict=feed_dict))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testWeighted2d_placeholders(self):
    predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
    labels = array_ops.placeholder(dtype=dtypes_lib.float32)
    feed_dict = {
        predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
        labels: ((0, 1, 1, 0), (1, 0, 0, 1))
    }
    precision, update_op = metrics.streaming_precision(
        predictions,
        labels,
        weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))

    with self.test_session():
      variables.local_variables_initializer().run()
      weighted_tp = 3.0 + 4.0
      weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
      expected_precision = weighted_tp / weighted_positives
      self.assertAlmostEqual(
          expected_precision, update_op.eval(feed_dict=feed_dict))
      self.assertAlmostEqual(
          expected_precision, precision.eval(feed_dict=feed_dict))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testStreamingConcat(self):
    with self.test_session() as sess:
      values = array_ops.placeholder(dtypes_lib.int32, [None])
      concatenated, update_op = metrics.streaming_concat(values)
      sess.run(variables.local_variables_initializer())

      self.assertAllEqual([], concatenated.eval())

      sess.run([update_op], feed_dict={values: [0, 1, 2]})
      self.assertAllEqual([0, 1, 2], concatenated.eval())

      sess.run([update_op], feed_dict={values: [3, 4]})
      self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())

      sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
      self.assertAllEqual(np.arange(10), concatenated.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def placeholder(dtype, axes, name=None):
  """Create a placeholder for a labeled tensor.

  For example:

    lt.placeholder(tf.float32, ['batch', ('channel', ['r', 'g', 'b'])])

  See tf.placeholder for more details.

  Args:
    dtype: The type of elements in the tensor to be fed.
    axes: sequence of strings (denoting axes of unknown size) and/or objects
      convertable to lt.Axis to label the result.
    name: Optional op name.

  Returns:
    Placeholder labeled tensor.
  """
  with ops.name_scope(name, 'lt_placeholder', []) as scope:
    axes = core.Axes([(axis, None) if isinstance(axis, string_types) else axis
                      for axis in axes])
    shape = [axis.size for axis in axes.values()]
    tensor = array_ops.placeholder(dtype, shape, name=scope)
    return core.LabeledTensor(tensor, axes)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    shape = list(shape)

    matrix = linear_operator_test_util.random_positive_definite_matrix(shape,
                                                                       dtype)

    if use_placeholder:
      matrix_ph = array_ops.placeholder(dtype=dtype)
      # Evaluate here because (i) you cannot feed a tensor, and (ii)
      # values are random and we want the same value used for both mat and
      # feed_dict.
      matrix = matrix.eval()
      operator = linalg.LinearOperatorMatrix(matrix)
      feed_dict = {matrix_ph: matrix}
    else:
      operator = linalg.LinearOperatorMatrix(matrix)
      feed_dict = None

    # Convert back to Tensor.  Needed if use_placeholder, since then we have
    # already evaluated matrix to a numpy array.
    mat = ops.convert_to_tensor(matrix)

    return operator, mat, feed_dict
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    matrix = linear_operator_test_util.random_normal(shape, dtype=dtype)
    if use_placeholder:
      matrix_ph = array_ops.placeholder(dtype=dtype)
      # Evaluate here because (i) you cannot feed a tensor, and (ii)
      # values are random and we want the same value used for both mat and
      # feed_dict.
      matrix = matrix.eval()
      operator = linalg.LinearOperatorMatrix(matrix)
      feed_dict = {matrix_ph: matrix}
    else:
      operator = linalg.LinearOperatorMatrix(matrix)
      feed_dict = None

    # Convert back to Tensor.  Needed if use_placeholder, since then we have
    # already evaluated matrix to a numpy array.
    mat = ops.convert_to_tensor(matrix)

    return operator, mat, feed_dict