Python tensorflow.python.ops.array_ops 模块,one_hot() 实例源码

我们从Python开源项目中,提取了以下22个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.one_hot()

项目:Question-Answering    作者:MurtyShikhar    | 项目源码 | 文件源码
def hardmax(logits, name=None):
  """Returns batched one-hot vectors.

  The depth index containing the `1` is that of the maximum logit value.

  Args:
    logits: A batch tensor of logit values.
    name: Name to use when creating ops.
  Returns:
    A batched one-hot tensor.
  """
  with ops.name_scope(name, "Hardmax", [logits]):
    logits = ops.convert_to_tensor(logits, name="logits")
    if logits.get_shape()[-1].value is not None:
      depth = logits.get_shape()[-1].value
    else:
      depth = array_ops.shape(logits)[-1]
    return array_ops.one_hot(
        math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def _get_eval_ops(self, features, targets, metrics):
    features, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner, training=False,
        **self.construction_args)

    probabilities = graph_builder.inference_graph(features, data_spec=spec)

    # One-hot the labels.
    if not self.params.regression:
      labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
          array_ops.squeeze(labels)), self.params.num_classes, 1, 0))

    if metrics is None:
      metrics = {self.accuracy_metric:
                 eval_metrics.get_metric(self.accuracy_metric)}

    result = {}
    for name, metric in six.iteritems(metrics):
      result[name] = metric(probabilities, labels)

    return result
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def one_hot(indices, num_classes):
      """Computes the one-hot representation of an integer tensor.

      Arguments:
          indices: nD integer tensor of shape
              `(batch_size, dim1, dim2, ... dim(n-1))`
          num_classes: Integer, number of classes to consider.

      Returns:
          (n + 1)D one hot representation of the input
          with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`

      Returns:
          The one-hot tensor.
      """
      return array_ops.one_hot(indices, depth=num_classes, axis=-1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
  """Encodes indices from given tensor as one-hot tensor.

  TODO(ilblackdragon): Ideally implementation should be
  part of TensorFlow with Eigen-native operation.

  Args:
    tensor_in: Input tensor of shape [N1, N2].
    num_classes: Number of classes to expand index into.
    on_value: Tensor or float, value to fill-in given index.
    off_value: Tensor or float, value to fill-in everything else.
  Returns:
    Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
    tensor.
  """
  return array_ops_.one_hot(
      math_ops.cast(tensor_in, dtypes.int64), num_classes, on_value, off_value)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
  """Encodes indices from given tensor as one-hot tensor.

  TODO(ilblackdragon): Ideally implementation should be
  part of TensorFlow with Eigen-native operation.

  Args:
    tensor_in: Input tensor of shape [N1, N2].
    num_classes: Number of classes to expand index into.
    on_value: Tensor or float, value to fill-in given index.
    off_value: Tensor or float, value to fill-in everything else.
  Returns:
    Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
    tensor.
  """
  return array_ops_.one_hot(
      math_ops.cast(tensor_in, dtypes.int64), num_classes, on_value, off_value)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _estimate_data_distribution(labels, num_classes, smoothing_constant=10):
  """Estimate data distribution as labels are seen."""
  # Variable to track running count of classes. Smooth by a nonzero value to
  # avoid division-by-zero. Higher values provide more stability at the cost of
  # slower convergence.
  if smoothing_constant <= 0:
    raise ValueError('smoothing_constant must be nonzero.')
  num_examples_per_class_seen = variables.Variable(
      initial_value=[smoothing_constant] * num_classes, trainable=False,
      name='class_count', dtype=dtypes.int64)

  # Update the class-count based on what labels are seen in batch.
  num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
      math_ops.reduce_sum(array_ops.one_hot(labels, num_classes,
                                            dtype=dtypes.int64), 0))

  # Normalize count into a probability.
  # NOTE: Without the `+= 0` line below, the test
  # `testMultiThreadedEstimateDataDistribution` fails. The reason is that
  # before this line, `num_examples_per_class_seen` is a Tensor that shares a
  # buffer with an underlying `ref` object. When the `ref` is changed by another
  # thread, `num_examples_per_class_seen` changes as well. Since this can happen
  # in the middle of the normalization computation, we get probabilities that
  # are very far from summing to one. Adding `+= 0` copies the contents of the
  # tensor to a new buffer, which will be consistent from the start to the end
  # of the normalization computation.
  num_examples_per_class_seen += 0
  init_prob_estimate = math_ops.truediv(
      num_examples_per_class_seen,
      math_ops.reduce_sum(num_examples_per_class_seen))

  # Must return float32 (not float64) to agree with downstream `_verify_input`
  # checks.
  return math_ops.cast(init_prob_estimate, dtypes.float32)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_eval_ops(self, features, targets, metrics):
    features, _, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)
    _assert_float32(features)
    _assert_float32(labels)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner, training=False,
        **self.construction_args)

    probabilities = graph_builder.inference_graph(features, data_spec=spec)

    # One-hot the labels.
    if not self.params.regression:
      labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
          array_ops.squeeze(labels)), self.params.num_classes, 1, 0))

    if metrics is None:
      metrics = {self.accuracy_metric:
                 eval_metrics.get_metric(self.accuracy_metric)}

    result = {}
    for name, metric in six.iteritems(metrics):
      result[name] = metric(probabilities, labels)

    return result
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _estimate_data_distribution(labels, num_classes, smoothing_constant=10):
  """Estimate data distribution as labels are seen."""
  # Variable to track running count of classes. Smooth by a nonzero value to
  # avoid division-by-zero. Higher values provide more stability at the cost of
  # slower convergence.
  if smoothing_constant <= 0:
    raise ValueError('smoothing_constant must be nonzero.')
  num_examples_per_class_seen = variables.Variable(
      initial_value=[smoothing_constant] * num_classes, trainable=False,
      name='class_count', dtype=dtypes.int64)

  # Update the class-count based on what labels are seen in batch.
  num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
      math_ops.reduce_sum(array_ops.one_hot(labels, num_classes,
                                            dtype=dtypes.int64), 0))

  # Normalize count into a probability.
  # NOTE: Without the `+= 0` line below, the test
  # `testMultiThreadedEstimateDataDistribution` fails. The reason is that
  # before this line, `num_examples_per_class_seen` is a Tensor that shares a
  # buffer with an underlying `ref` object. When the `ref` is changed by another
  # thread, `num_examples_per_class_seen` changes as well. Since this can happen
  # in the middle of the normalization computation, we get probabilities that
  # are very far from summing to one. Adding `+= 0` copies the contents of the
  # tensor to a new buffer, which will be consistent from the start to the end
  # of the normalization computation.
  num_examples_per_class_seen += 0
  init_prob_estimate = math_ops.truediv(
      num_examples_per_class_seen,
      math_ops.reduce_sum(num_examples_per_class_seen))

  # Must return float32 (not float64) to agree with downstream `_verify_input`
  # checks.
  return math_ops.cast(init_prob_estimate, dtypes.float32)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _to_dnn_input_layer(self,
                          input_tensor,
                          weight_collections=None,
                          trainable=True,
                          output_rank=2):
    if output_rank != 2:
      raise ValueError("BucketizedColumn currently only supports output_rank=2")
    return array_ops.reshape(
        array_ops.one_hot(
            math_ops.to_int64(input_tensor),
            self.length,
            1.,
            0.,
            name="one_hot"), [-1, self.length * self.source_column.dimension],
        name="reshape")
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def one_hot_wrapper(num_classes, loss_fn):
  """Some loss functions take one-hot labels."""
  def _loss(probs, targets):
    one_hot_labels = array_ops.one_hot(
        math_ops.to_int32(targets), num_classes,
        on_value=1., off_value=0., dtype=dtypes.float32)
    return loss_fn(probs, one_hot_labels)
  return _loss
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _class_id_labels_to_indicator(labels, num_classes):
  if (num_classes is None) or (num_classes < 2):
    raise ValueError("Invalid num_classes %s." % num_classes)
  with ops.control_dependencies((_assert_labels_rank(labels),)):
    labels = array_ops.reshape(labels, (-1,))
  return array_ops.one_hot(labels, depth=num_classes, axis=-1)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def logistic_model_no_mode_fn(features, labels):
  features = extract(features, 'input')
  labels = extract(labels, 'labels')
  labels = array_ops.one_hot(labels, 3, 1, 0)
  prediction, loss = (models.logistic_regression_zero_init(features, labels))
  train_op = optimizers.optimize_loss(
      loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
  return {
      'class': math_ops.argmax(prediction, 1),
      'prob': prediction
  }, loss, train_op
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _to_dnn_input_layer(self,
                          input_tensor,
                          weight_collections=None,
                          trainable=True,
                          output_rank=2):
    if output_rank != 2:
      raise ValueError("BucketizedColumn currently only supports output_rank=2")
    return array_ops.reshape(
        array_ops.one_hot(
            math_ops.to_int64(input_tensor),
            self.length,
            1.,
            0.,
            name="one_hot"), [-1, self.length * self.source_column.dimension],
        name="reshape")
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    n_draws = math_ops.cast(self.n, dtype=dtypes.int32)
    if self.n.get_shape().ndims is not None:
      if self.n.get_shape().ndims != 0:
        raise NotImplementedError(
            "Sample only supported for scalar number of draws.")
    elif self.validate_args:
      is_scalar = check_ops.assert_rank(
          n_draws, 0,
          message="Sample only supported for scalar number of draws.")
      n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
    k = self.event_shape()[0]
    unnormalized_logits = array_ops.reshape(
        math_ops.log(random_ops.random_gamma(
            shape=[n],
            alpha=self.alpha,
            dtype=self.dtype,
            seed=seed)),
        shape=[-1, k])
    draws = random_ops.multinomial(
        logits=unnormalized_logits,
        num_samples=n_draws,
        seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
    x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
                            reduction_indices=-2)
    final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
    return array_ops.reshape(x, final_shape)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
    logits = self.logits
    if logits.get_shape().ndims == 2:
      logits_2d = logits
    else:
      logits_2d = array_ops.reshape(logits, [-1, self.event_size])
    samples = random_ops.multinomial(logits_2d, n, seed=seed)
    samples = array_ops.transpose(samples)
    samples = array_ops.one_hot(samples, self.event_size, dtype=self.dtype)
    ret = array_ops.reshape(samples, sample_shape)
    return ret
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _mode(self):
    ret = math_ops.argmax(self.logits, axis=self._batch_rank)
    ret = array_ops.one_hot(ret, self.event_size, dtype=self.dtype)
    ret.set_shape(self.logits.get_shape())
    return ret
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_batch_from_per_class_queues(per_class_queues, probs, batch_size):
  """Generates batches according to per-class-probabilities."""
  num_classes = probs.get_shape().num_elements()
  # Number of examples per class is governed by a multinomial distribution.
  # Note: multinomial takes unnormalized log probabilities for its first
  # argument, of dimension [batch_size, num_classes].
  examples = random_ops.multinomial(
      array_ops.expand_dims(math_ops.log(probs), 0), batch_size)

  # Prepare the data and label batches.
  val_list = []
  label_list = []
  for i in range(num_classes):
    num_examples = math_ops.reduce_sum(
        math_ops.cast(math_ops.equal(examples, i), dtypes.int32))
    tensors = per_class_queues[i].dequeue_many(num_examples)

    # If you enqueue a list with a single tensor, only a single tensor is
    # returned. If you enqueue a list with multiple tensors, then a list is
    # returned. We want to handle both cases, so reduce the case of the single
    # tensor to the case of multiple tensors.
    if not isinstance(tensors, list):
      tensors = [tensors]

    val_list.append(tensors)
    label_list.append(array_ops.ones([num_examples], dtype=dtypes.int32) * i)

  # Create a list of tensor of values. val_list is of dimension
  # [num_classes x len(tensors)]. We want list_batch_vals to be of dimension
  # [len(tensors)].
  num_data = len(val_list[0])
  list_batch_vals = [array_ops.concat(
      0, [val_list[i][j] for i in range(num_classes)]) for j in range(num_data)]

  # Create a tensor of labels.
  batch_labels = array_ops.concat(0, label_list)
  batch_labels.set_shape([batch_size])

  # Debug instrumentation.
  sample_tags = ['stratified_sample/%s/samples_class%i' % (batch_labels.name, i)
                 for i in range(num_classes)]
  logging_ops.scalar_summary(sample_tags, math_ops.reduce_sum(
      array_ops.one_hot(batch_labels, num_classes), 0))

  return list_batch_vals, batch_labels
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_dnn_input_layer(self,
                         transformed_input_tensor,
                         unused_weight_collections=None,
                         unused_trainable=False):
    """Returns a Tensor as an input to the first layer of neural network.

    Args:
      transformed_input_tensor: A tensor that has undergone the transformations
      in `insert_transformed_feature`.
      unused_weight_collections: Unused. One hot encodings are not variable.
      unused_trainable: Unused. One hot encodings are not trainable.

    Returns:
      A multihot Tensor to be fed into the first layer of neural network.

    Raises:
      ValueError: When using one_hot_column with weighted_sparse_column.
      This is not yet supported.
    """

    if (self.sparse_id_column.weight_tensor(transformed_input_tensor) is
        not None):
      raise ValueError("one_hot_column does not yet support "
                       "weighted_sparse_column. Column: {}".format(self))

    dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
        self.sparse_id_column.id_tensor(transformed_input_tensor),
        default_value=-1)

    check_shape_op = control_flow_ops.Assert(
        math_ops.equal(array_ops.rank(dense_id_tensor), 2),
        ["Tensor should be of shape: [batch, max num multivalent values]"])
    with ops.control_dependencies([check_shape_op]):
    # One hot must be float for tf.concat reasons since all other inputs to
    # input_layer are float32.
      one_hot_id_tensor = array_ops.one_hot(
          dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)

    # Reduce to get a multi-hot per example.
    return math_ops.reduce_sum(one_hot_id_tensor, reduction_indices=[1])

  # pylint: disable=unused-argument
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _to_dnn_input_layer(self,
                          transformed_input_tensor,
                          unused_weight_collections=None,
                          unused_trainable=False,
                          output_rank=2):
    """Returns a Tensor as an input to the first layer of neural network.

    Args:
      transformed_input_tensor: A tensor that has undergone the transformations
      in `insert_transformed_feature`. Rank should be >= `output_rank`.
      unused_weight_collections: Unused. One hot encodings are not variable.
      unused_trainable: Unused. One hot encodings are not trainable.
      output_rank: the desired rank of the output `Tensor`.

    Returns:
      A multihot Tensor to be fed into the first layer of neural network.

    Raises:
      ValueError: When using one_hot_column with weighted_sparse_column.
      This is not yet supported.
    """

    if (self.sparse_id_column.weight_tensor(transformed_input_tensor) is
        not None):
      raise ValueError("one_hot_column does not yet support "
                       "weighted_sparse_column. Column: {}".format(self))

    # Reshape ID column to `output_rank`.
    sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor)
    # pylint: disable=protected-access
    sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank)

    dense_id_tensor = sparse_ops.sparse_tensor_to_dense(sparse_id_column,
                                                        default_value=-1)

    # One hot must be float for tf.concat reasons since all other inputs to
    # input_layer are float32.
    one_hot_id_tensor = array_ops.one_hot(
        dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)

    # Reduce to get a multi-hot per example.
    return math_ops.reduce_sum(
        one_hot_id_tensor, reduction_indices=[output_rank - 1])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _inverse(self, y):
    # To derive the inverse mapping note that:
    #   y[i] = exp(x[i]) / normalization
    # and
    #   y[end] = 1 / normalization.
    # Thus:
    # x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
    #      = log(exp(x[i])/normalization) - log(y[end])
    #      = log(y[i]) - log(y[end])
    shape = (np.asarray(y.get_shape().as_list(), dtype=np.int32)
             if y.get_shape().is_fully_defined()
             else array_ops.shape(y, name="shape"))
    ndims = y.get_shape().ndims or math_ops.rank(y, name="ndims")

    # Do this first to make sure CSE catches that it'll happen again in
    # _inverse_log_det_jacobian.
    x = math_ops.log(y)

    # We now extract the last coordinate of the rightmost dimension.
    # Our trick is to slice from [0,0,...,shape[-1]-1] to shape[:-1]+[1].
    begin = array_ops.one_hot(indices=ndims-1,
                              depth=ndims,
                              on_value=shape[-1]-np.array(1, dtype=shape.dtype),
                              dtype=shape.dtype)
    size = array_ops.concat(0, (shape[:-1], np.asarray([1], dtype=shape.dtype)))
    log_normalization = -array_ops.slice(x, begin, size)

    # Here we slice out all but the last coordinate; see above for idea.
    begin = array_ops.zeros_like(shape)
    size = array_ops.concat(0, (shape[:-1], [shape[-1]-1]))
    x = array_ops.slice(x, begin, size)

    x += log_normalization

    if self._static_event_ndims == 0:
      x = array_ops.squeeze(x, squeeze_dims=[ndims-1])

    # Set shape hints.
    if y.get_shape().ndims is not None:
      shape = y.get_shape().as_list()
      if self._static_event_ndims == 0:
        shape = shape[:-1]
      elif shape[-1] is not None:
        shape[-1] -= 1
      shape = tensor_shape.TensorShape(shape)
      x.get_shape().assert_is_compatible_with(shape)
      x.set_shape(shape)

    return x
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _to_dnn_input_layer(self,
                          transformed_input_tensor,
                          unused_weight_collections=None,
                          unused_trainable=False,
                          output_rank=2):
    """Returns a Tensor as an input to the first layer of neural network.

    Args:
      transformed_input_tensor: A tensor that has undergone the transformations
      in `insert_transformed_feature`. Rank should be >= `output_rank`.
      unused_weight_collections: Unused. One hot encodings are not variable.
      unused_trainable: Unused. One hot encodings are not trainable.
      output_rank: the desired rank of the output `Tensor`.

    Returns:
      A multihot Tensor to be fed into the first layer of neural network.

    Raises:
      ValueError: When using one_hot_column with weighted_sparse_column.
      This is not yet supported.
    """

    # Reshape ID column to `output_rank`.
    sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor)
    # pylint: disable=protected-access
    sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank)

    weight_tensor = self.sparse_id_column.weight_tensor(
        transformed_input_tensor)
    if weight_tensor is not None:
      weighted_column = sparse_ops.sparse_merge(sp_ids=sparse_id_column,
                                                sp_values=weight_tensor,
                                                vocab_size=self.length)
      return sparse_ops.sparse_tensor_to_dense(weighted_column)

    dense_id_tensor = sparse_ops.sparse_tensor_to_dense(sparse_id_column,
                                                        default_value=-1)

    # One hot must be float for tf.concat reasons since all other inputs to
    # input_layer are float32.
    one_hot_id_tensor = array_ops.one_hot(
        dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)

    # Reduce to get a multi-hot per example.
    return math_ops.reduce_sum(
        one_hot_id_tensor, reduction_indices=[output_rank - 1])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _inverse(self, y):
    # To derive the inverse mapping note that:
    #   y[i] = exp(x[i]) / normalization
    # and
    #   y[end] = 1 / normalization.
    # Thus:
    # x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
    #      = log(exp(x[i])/normalization) - log(y[end])
    #      = log(y[i]) - log(y[end])
    shape = (np.asarray(y.get_shape().as_list(), dtype=np.int32)
             if y.get_shape().is_fully_defined()
             else array_ops.shape(y, name="shape"))
    ndims = y.get_shape().ndims or math_ops.rank(y, name="ndims")

    # Do this first to make sure CSE catches that it'll happen again in
    # _inverse_log_det_jacobian.
    x = math_ops.log(y)

    # We now extract the last coordinate of the rightmost dimension.
    # Our trick is to slice from [0,0,...,shape[-1]-1] to shape[:-1]+[1].
    begin = array_ops.one_hot(indices=ndims-1,
                              depth=ndims,
                              on_value=shape[-1]-np.array(1, dtype=shape.dtype),
                              dtype=shape.dtype)
    size = array_ops.concat((shape[:-1], np.asarray([1], dtype=shape.dtype)), 0)
    log_normalization = -array_ops.strided_slice(x, begin, begin + size)

    # Here we slice out all but the last coordinate; see above for idea.
    begin = array_ops.zeros_like(shape)
    size = array_ops.concat((shape[:-1], [shape[-1] - 1]), 0)
    x = array_ops.strided_slice(x, begin, begin + size)

    x += log_normalization

    if self._static_event_ndims == 0:
      x = array_ops.squeeze(x, squeeze_dims=[ndims-1])

    # Set shape hints.
    if y.get_shape().ndims is not None:
      shape = y.get_shape().as_list()
      if self._static_event_ndims == 0:
        shape = shape[:-1]
      elif shape[-1] is not None:
        shape[-1] -= 1
      shape = tensor_shape.TensorShape(shape)
      x.get_shape().assert_is_compatible_with(shape)
      x.set_shape(shape)

    return x