Python tensorflow.python.ops.array_ops 模块,transpose() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.transpose()

项目:tensorflow_end2end_speech_recognition    作者:hirofumi0810    | 项目源码 | 文件源码
def _transpose_batch_time(x):
    """Transpose the batch and time dimensions of a Tensor.
    Retains as much of the static shape information as possible.
    Args:
        x: A tensor of rank 2 or higher.
    Returns:
        x transposed along the first two dimensions.
    Raises:
        ValueError: if `x` is rank 1 or lower.
    """
    x_static_shape = x.get_shape()
    if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
        raise ValueError(
            "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
            (x, x_static_shape))
    x_rank = array_ops.rank(x)
    x_t = array_ops.transpose(
        x, array_ops.concat(
            ([1, 0], math_ops.range(2, x_rank)), axis=0))
    x_t.set_shape(
        tensor_shape.TensorShape([
            x_static_shape[1].value, x_static_shape[0].value
        ]).concatenate(x_static_shape[2:]))
    return x_t
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _preprocess_conv2d_input(x, data_format):
      """Transpose and cast the input before the conv2d.

      Arguments:
          x: input tensor.
          data_format: string, one of 'channels_last', 'channels_first'.

      Returns:
          A tensor.
      """
      if dtype(x) == 'float64':
        x = math_ops.cast(x, 'float32')
      if data_format == 'channels_first':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        x = array_ops.transpose(x, (0, 2, 3, 1))
      return x
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _postprocess_conv2d_output(x, data_format):
      """Transpose and cast the output from conv2d if needed.

      Arguments:
          x: A tensor.
          data_format: string, one of "channels_last", "channels_first".

      Returns:
          A tensor.
      """

      if data_format == 'channels_first':
        x = array_ops.transpose(x, (0, 3, 1, 2))

      if floatx() == 'float64':
        x = math_ops.cast(x, 'float64')
      return x
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _postprocess_conv3d_output(x, data_format):
      """Transpose and cast the output from conv3d if needed.

      Arguments:
          x: A tensor.
          data_format: string, one of "channels_last", "channels_first".

      Returns:
          A tensor.
      """
      if data_format == 'channels_first':
        x = array_ops.transpose(x, (0, 4, 1, 2, 3))

      if floatx() == 'float64':
        x = math_ops.cast(x, 'float64')
      return x
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    # Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
    shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
    white_samples = random_ops.random_normal(shape=shape,
                                             mean=0,
                                             stddev=1,
                                             dtype=self.dtype,
                                             seed=seed)

    correlated_samples = self._cov.sqrt_matmul(white_samples)

    # Move the last dimension to the front
    perm = array_ops.concat(0, (
        array_ops.pack([array_ops.rank(correlated_samples) - 1]),
        math_ops.range(0, array_ops.rank(correlated_samples) - 1)))

    # TODO(ebrevdo): Once we get a proper tensor contraction op,
    # perform the inner product using that instead of batch_matmul
    # and this slow transpose can go away!
    correlated_samples = array_ops.transpose(correlated_samples, perm)
    samples = correlated_samples + self.mu
    return samples
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def impose_axis_order(labeled_tensor, axis_order=None, name=None):
  """Impose desired axis order on a labeled tensor.

  Args:
    labeled_tensor: The input tensor.
    axis_order: Optional desired axis order, as a list of names. If not
      provided, defaults to the current axis_order_scope (if set).
    name: Optional op name.

  Returns:
    Labeled tensor with possibly transposed axes.

  Raises:
    AxisOrderError: If no axis_order is provided or axis_order does not contain
      all axes on the input tensor.
  """
  with ops.name_scope(name, 'lt_impose_axis_order', [labeled_tensor]) as scope:
    labeled_tensor = convert_to_labeled_tensor(labeled_tensor)

    if axis_order is None:
      axis_order = _get_valid_axis_order()

    relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]

    return transpose(labeled_tensor, relevant_axis_order, name=scope)
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u, g = array_ops.split(1, 3, _linear([inputs, state],
                                             3 * self._num_units, True, 1.0))
        r, u, g = sigmoid(r), sigmoid(u), sigmoid(g)
      with vs.variable_scope("Candidate"):
        c = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))
      new_h = u * state + (1 - u) * c

      eps = 1e-13
      temp = math_ops.div(math_ops.reduce_sum(math_ops.mul(new_h, state),1), \
                          math_ops.reduce_sum(math_ops.mul(state,state),1) + eps)

      m = array_ops.transpose(g)

      t1 = math_ops.mul(m , temp)
      t1 = array_ops.transpose(t1) 

      distract_h = new_h  -  state * t1
    return distract_h, distract_h
项目:polyaxon    作者:polyaxon    | 项目源码 | 文件源码
def transpose_batch_time(x):
    """Transpose the batch and time dimensions of a Tensor.

    Retains as much of the static shape information as possible.

    Args:
        x: A tensor of rank 2 or higher.

    Returns:
        x transposed along the first two dimensions.

    Raises:
        ValueError: if `x` is rank 1 or lower.
    """
    x_static_shape = x.get_shape()
    if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
        raise ValueError(
            "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
            (x, x_static_shape))
    x_rank = array_ops.rank(x)
    x_t = array_ops.transpose(x, array_ops.concat(([1, 0], math_ops.range(2, x_rank)), axis=0))
    x_t.set_shape(tf.tensor_shape.TensorShape([
        x_static_shape[1].value, x_static_shape[0].value]).concatenate(x_static_shape[2:]))
    return x_t
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _compute_euclidean_distance(cls, inputs, clusters):
    """Computes Euclidean distance between each input and each cluster center.

    Args:
      inputs: list of input Tensors.
      clusters: cluster Tensor.

    Returns:
      list of Tensors, where each element corresponds to each element in inputs.
      The value is the distance of each row to all the cluster centers.
    """
    output = []
    for inp in inputs:
      with ops.colocate_with(inp):
        # Computes Euclidean distance. Note the first and third terms are
        # broadcast additions.
        squared_distance = (math_ops.reduce_sum(
            math_ops.square(inp), 1, keep_dims=True) - 2 * math_ops.matmul(
                inp, clusters, transpose_b=True) + array_ops.transpose(
                    math_ops.reduce_sum(
                        math_ops.square(clusters), 1, keep_dims=True)))
        output.append(squared_distance)

    return output
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _define_full_covariance_probs(self, shard_id, shard):
    """Defines the full covariance probabilties per example in a class.

    Updates a matrix with dimension num_examples X num_classes.

    Args:
      shard_id: id of the current shard.
      shard: current data shard, 1 X num_examples X dimensions.
    """
    diff = shard - self._means
    cholesky = linalg_ops.cholesky(self._covs + self._min_var)
    log_det_covs = 2.0 * math_ops.reduce_sum(
        math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
    x_mu_cov = math_ops.square(
        linalg_ops.matrix_triangular_solve(
            cholesky, array_ops.transpose(
                diff, perm=[0, 2, 1]), lower=True))
    diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
    self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
                                    * math_ops.log(2 * np.pi) + log_det_covs)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _define_diag_covariance_probs(self, shard_id, shard):
    """Defines the diagonal covariance probabilities per example in a class.

    Args:
      shard_id: id of the current shard.
      shard: current data shard, 1 X num_examples X dimensions.

    Returns a matrix num_examples * num_classes.
    """
    # num_classes X 1
    # TODO(xavigonzalvo): look into alternatives to log for
    # reparametrization of variance parameters.
    det_expanded = math_ops.reduce_sum(
        math_ops.log(self._covs + 1e-3), 1, keep_dims=True)
    diff = shard - self._means
    x2 = math_ops.square(diff)
    cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2)
    # num_classes X num_examples
    x2_cov = math_ops.matmul(x2, cov_expanded)
    x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
    self._probs[shard_id] = -0.5 * (
        math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) +
        array_ops.transpose(det_expanded) + x2_cov)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _define_partial_maximization_operation(self, shard_id, shard):
    """Computes the partial statistics of the means and covariances.

    Args:
      shard_id: current shard id.
      shard: current data shard, 1 X num_examples X dimensions.
    """
    # Soft assignment of each data point to each of the two clusters.
    self._points_in_k[shard_id] = math_ops.reduce_sum(
        self._w[shard_id], 0, keep_dims=True)
    # Partial means.
    w_mul_x = array_ops.expand_dims(
        math_ops.matmul(
            self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
        1)
    self._w_mul_x.append(w_mul_x)
    # Partial covariances.
    x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
    x_trans = array_ops.transpose(x, perm=[0, 2, 1])
    x_mul_w = array_ops.concat([
        array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
        for k in range(self._num_classes)
    ], 0)
    self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
    if self.total_count.get_shape().ndims is not None:
      if self.total_count.get_shape().ndims != 0:
        raise NotImplementedError(
            "Sample only supported for scalar number of draws.")
    elif self.validate_args:
      is_scalar = check_ops.assert_rank(
          n_draws, 0,
          message="Sample only supported for scalar number of draws.")
      n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
    k = self.event_shape()[0]
    # Flatten batch dims so logits has shape [B, k],
    # where B = reduce_prod(self.batch_shape()).
    draws = random_ops.multinomial(
        logits=array_ops.reshape(self.logits, [-1, k]),
        num_samples=n * n_draws,
        seed=seed)
    draws = array_ops.reshape(draws, shape=[-1, n, n_draws])
    x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
                            reduction_indices=-2)  # shape: [B, n, k]
    x = array_ops.transpose(x, perm=[1, 0, 2])
    final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
    return array_ops.reshape(x, final_shape)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    # Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
    shape = array_ops.concat([self._cov.vector_shape(), [n]], 0)
    white_samples = random_ops.random_normal(shape=shape,
                                             mean=0.,
                                             stddev=1.,
                                             dtype=self.dtype,
                                             seed=seed)

    correlated_samples = self._cov.sqrt_matmul(white_samples)

    # Move the last dimension to the front
    perm = array_ops.concat(
        (array_ops.stack([array_ops.rank(correlated_samples) - 1]),
         math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0)

    # TODO(ebrevdo): Once we get a proper tensor contraction op,
    # perform the inner product using that instead of batch_matmul
    # and this slow transpose can go away!
    correlated_samples = array_ops.transpose(correlated_samples, perm)
    samples = correlated_samples + self.mu
    return samples
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def sequence_to_images(tensor, num_image_batches):
  """Convert a batch of sequences into a batch of images.

  Args:
    tensor: (num_steps, num_batches, depth) sequence tensor
    num_image_batches: the number of image batches

  Returns:
    (num_images, height, width, depth) tensor
  """

  width, num_batches, depth = _shape(tensor)
  height = num_batches // num_image_batches
  reshaped = array_ops.reshape(tensor,
                               [width, num_image_batches, height, depth])
  return array_ops.transpose(reshaped, [1, 2, 0, 3])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def separable_lstm(images, num_filters_out, nhidden=None, scope=None):
  """Run bidirectional LSTMs first horizontally then vertically.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output layer depth
    nhidden: hidden layer depth
    scope: optional scope name

  Returns:
    (num_images, height, width, num_filters_out) tensor
  """
  with variable_scope.variable_scope(scope, "SeparableLstm", [images]):
    if nhidden is None:
      nhidden = num_filters_out
    hidden = horizontal_lstm(images, nhidden)
    with variable_scope.variable_scope("vertical"):
      transposed = array_ops.transpose(hidden, [0, 2, 1, 3])
      output_transposed = horizontal_lstm(transposed, num_filters_out)
    output = array_ops.transpose(output_transposed, [0, 2, 1, 3])
    return output
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def reduce_to_sequence(images, num_filters_out, scope=None):
  """Reduce an image to a sequence by scanning an LSTM vertically.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output layer depth
    scope: optional scope name

  Returns:
    A (width, num_images, num_filters_out) sequence.
  """
  with variable_scope.variable_scope(scope, "ReduceToSequence", [images]):
    batch_size, height, width, depth = _shape(images)
    transposed = array_ops.transpose(images, [1, 0, 2, 3])
    reshaped = array_ops.reshape(transposed,
                                 [height, batch_size * width, depth])
    reduced = lstm1d.sequence_to_final(reshaped, num_filters_out)
    output = array_ops.reshape(reduced, [batch_size, width, num_filters_out])
    return output
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testTranspose(self):
    for dtype in self.numeric_types:
      self._testBinary(
          array_ops.transpose,
          np.zeros(shape=[1, 0, 4], dtype=dtype),
          np.array([1, 2, 0], dtype=np.int32),
          expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
      self._testBinary(
          array_ops.transpose,
          np.array([[1, 2], [3, 4]], dtype=dtype),
          np.array([0, 1], dtype=np.int32),
          expected=np.array([[1, 2], [3, 4]], dtype=dtype))
      self._testBinary(
          array_ops.transpose,
          np.array([[1, 2], [3, 4]], dtype=dtype),
          np.array([1, 0], dtype=np.int32),
          expected=np.array([[1, 3], [2, 4]], dtype=dtype))
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _transpose_batch_time(x):
  """Transpose the batch and time dimensions of a Tensor.

  Retains as much of the static shape information as possible.

  Args:
    x: A tensor of rank 2 or higher.

  Returns:
    x transposed along the first two dimensions.

  Raises:
    ValueError: if `x` is rank 1 or lower.
  """
  x_static_shape = x.get_shape()
  if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
    raise ValueError(
        "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
        (x, x_static_shape))
  x_rank = array_ops.rank(x)
  x_t = array_ops.transpose(
      x, array_ops.concat(
          ([1, 0], math_ops.range(2, x_rank)), axis=0))
  x_t.set_shape(
      tensor_shape.TensorShape([
          x_static_shape[1].value, x_static_shape[0].value
      ]).concatenate(x_static_shape[2:]))
  return x_t
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _transpose_batch_time(x):
  """Transpose the batch and time dimensions of a Tensor.

  Retains as much of the static shape information as possible.

  Args:
    x: A tensor of rank 2 or higher.

  Returns:
    x transposed along the first two dimensions.

  Raises:
    ValueError: if `x` is rank 1 or lower.
  """
  x_static_shape = x.get_shape()
  if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
    raise ValueError(
        "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
        (x, x_static_shape))
  x_rank = array_ops.rank(x)
  x_t = array_ops.transpose(
      x, array_ops.concat(
          ([1, 0], math_ops.range(2, x_rank)), axis=0))
  x_t.set_shape(
      tensor_shape.TensorShape([
          x_static_shape[1].value, x_static_shape[0].value
      ]).concatenate(x_static_shape[2:]))
  return x_t
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def transpose(x):
      """Transposes a tensor and returns it.

      Arguments:
          x: Tensor or variable.

      Returns:
          A tensor.

      Examples:
      ```python
          >>> var = K.variable([[1, 2, 3], [4, 5, 6]])
          >>> K.eval(var)
          array([[ 1.,  2.,  3.],
                 [ 4.,  5.,  6.]], dtype=float32)
          >>> var_transposed = K.transpose(var)
          >>> K.eval(var_transposed)
          array([[ 1.,  4.],
                 [ 2.,  5.],
                 [ 3.,  6.]], dtype=float32)
```python
      >>> input = K.placeholder((2, 3))
      >>> input
      <tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
      >>> input_transposed = K.transpose(input)
      >>> input_transposed
      <tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>

  ```
  """
  return array_ops.transpose(x)

```

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def permute_dimensions(x, pattern):
      """Permutes axes in a tensor.

      Arguments:
          x: Tensor or variable.
          pattern: A tuple of
              dimension indices, e.g. `(0, 2, 1)`.

      Returns:
          A tensor.
      """
      return array_ops.transpose(x, perm=pattern)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _preprocess_conv3d_input(x, data_format):
      """Transpose and cast the input before the conv3d.

      Arguments:
          x: input tensor.
          data_format: string, one of 'channels_last', 'channels_first'.

      Returns:
          A tensor.
      """
      if dtype(x) == 'float64':
        x = math_ops.cast(x, 'float32')
      if data_format == 'channels_first':
        x = array_ops.transpose(x, (0, 2, 3, 4, 1))
      return x
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def _preprocess_conv2d_kernel(kernel, data_format):
      """Transpose and cast the kernel before the conv2d.

      Arguments:
          kernel: kernel tensor.
          data_format: string, one of 'channels_last', 'channels_first'.

      Returns:
          A tensor.
      """
      if dtype(kernel) == 'float64':
        kernel = math_ops.cast(kernel, 'float32')
      if data_format == 'channels_first':
        kernel = array_ops.transpose(kernel, (2, 3, 1, 0))
      return kernel
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def convert_dense_weights_data_format(dense,
                                              previous_feature_map_shape,
                                              target_data_format='channels_first'):
          """Utility useful when changing a convnet's `data_format`.

          When porting the weights of a convnet from one data format to the other,
          if the convnet includes a `Flatten` layer
          (applied to the last convolutional feature map)
          followed by a `Dense` layer, the weights of that `Dense` layer
          should be updated to reflect the new dimension ordering.

          Arguments:
              dense: The target `Dense` layer.
              previous_feature_map_shape: A shape tuple of 3 integers,
                  e.g. `(512, 7, 7)`. The shape of the convolutional
                  feature map right before the `Flatten` layer that
                  came before the target `Dense` layer.
              target_data_format: One of "channels_last", "channels_first".
                  Set it "channels_last"
                  if converting a "channels_first" model to "channels_last",
                  or reciprocally.
          """
          assert target_data_format in {'channels_last', 'channels_first'}
          kernel, bias = dense.get_weights()
          for i in range(kernel.shape[1]):
            if target_data_format == 'channels_first':
              c, h, w = previous_feature_map_shape
              original_fm_shape = (h, w, c)
              ki = kernel[:, i].reshape(original_fm_shape)
              ki = np.transpose(ki, (2, 0, 1))  # last -> first
            else:
              h, w, c = previous_feature_map_shape
              original_fm_shape = (c, h, w)
              ki = kernel[:, i].reshape(original_fm_shape)
              ki = np.transpose(ki, (1, 2, 0))  # first -> last
            kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
          dense.set_weights([kernel, bias])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def img_to_array(img, data_format=None):
          """Converts a PIL Image instance to a Numpy array.

          Arguments:
              img: PIL Image instance.
              data_format: Image data format.

          Returns:
              A 3D Numpy array.

          Raises:
              ValueError: if invalid `img` or `data_format` is passed.
          """
          if data_format is None:
            data_format = K.image_data_format()
          if data_format not in {'channels_first', 'channels_last'}:
            raise ValueError('Unknown data_format: ', data_format)
          # Numpy array x has format (height, width, channel)
          # or (channel, height, width)
          # but original PIL image has format (width, height, channel)
          x = np.asarray(img, dtype=K.floatx())
          if len(x.shape) == 3:
            if data_format == 'channels_first':
              x = x.transpose(2, 0, 1)
          elif len(x.shape) == 2:
            if data_format == 'channels_first':
              x = x.reshape((1, x.shape[0], x.shape[1]))
            else:
              x = x.reshape((x.shape[0], x.shape[1], 1))
          else:
            raise ValueError('Unsupported image shape: ', x.shape)
          return x

        # load image file into array with a given target_size rather than original size of image
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def load_data(label_mode='fine'):
          """Loads CIFAR100 dataset.

          Arguments:
              label_mode: one of "fine", "coarse".

          Returns:
              Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.

          Raises:
              ValueError: in case of invalid `label_mode`.
          """
          if label_mode not in ['fine', 'coarse']:
            raise ValueError('label_mode must be one of "fine" "coarse".')

          dirname = 'cifar-100-python'
          origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
          path = get_file(dirname, origin=origin, untar=True)

          fpath = os.path.join(path, 'train')
          x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')

          fpath = os.path.join(path, 'test')
          x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')

          y_train = np.reshape(y_train, (len(y_train), 1))
          y_test = np.reshape(y_test, (len(y_test), 1))

          if K.image_data_format() == 'channels_last':
            x_train = x_train.transpose(0, 2, 3, 1)
            x_test = x_test.transpose(0, 2, 3, 1)

          return (x_train, y_train), (x_test, y_test)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_sparse_tensor(self, input_tensor):
    """Creates a SparseTensor from the bucketized Tensor."""
    dimension = self.source_column.dimension
    batch_size = array_ops.shape(input_tensor, name="shape")[0]

    if dimension > 1:
      i1 = array_ops.reshape(
          array_ops.tile(
              array_ops.expand_dims(
                  math_ops.range(0, batch_size), 1, name="expand_dims"),
              [1, dimension],
              name="tile"), [-1],
          name="rehsape")
      i2 = array_ops.tile(
          math_ops.range(0, dimension), [batch_size], name="tile")
      # Flatten the bucket indices and unique them across dimensions
      # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
      bucket_indices = array_ops.reshape(
          input_tensor, [-1], name="reshape") + self.length * i2
    else:
      # Simpler indices when dimension=1
      i1 = math_ops.range(0, batch_size)
      i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
      bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")

    indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
    shape = math_ops.to_int64(array_ops.pack([batch_size, dimension]))
    sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)

    return sparse_id_values
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _flip_matrix_to_vector_static(mat, static_batch_shape):
  """Flip matrix to vector with static shapes."""
  mat_rank = mat.get_shape().ndims
  k = mat.get_shape()[-2]
  final_shape = static_batch_shape.concatenate(k)

  # mat.shape = matrix_batch_shape + [k, M]
  # Permutation corresponding to [M] + matrix_batch_shape + [k]
  perm = [mat_rank - 1] + list(range(0, mat_rank - 1))
  mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
  vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
  return vector
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _flip_matrix_to_vector_dynamic(mat, batch_shape):
  """Flip matrix to vector with dynamic shapes."""
  mat_rank = array_ops.rank(mat)
  k = array_ops.gather(array_ops.shape(mat), mat_rank - 2)
  final_shape = array_ops.concat(0, (batch_shape, [k]))

  # mat.shape = matrix_batch_shape + [k, M]
  # Permutation corresponding to [M] + matrix_batch_shape + [k]
  perm = array_ops.concat(
      0, ([mat_rank - 1], math_ops.range(0, mat_rank - 1)))
  mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
  vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
  return vector
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _flip_vector_to_matrix_dynamic(vec, batch_shape):
  """flip_vector_to_matrix with dynamic shapes."""
  # Shapes associated with batch_shape
  batch_rank = array_ops.size(batch_shape)

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = array_ops.shape(vec)
  vec_rank = array_ops.rank(vec)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = array_ops.slice(vec_shape, [0], [m])
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
  k = array_ops.gather(vec_shape, vec_rank - 1)
  new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape))

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat(
        0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
    return array_ops.transpose(vec, perm=perm)

  x_flipped = control_flow_ops.cond(
      math_ops.less(0, m),
      _flip_front_dims_to_back,
      lambda: array_ops.expand_dims(vec, -1))

  return array_ops.reshape(x_flipped, new_shape)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    logits_2d = array_ops.reshape(
        self.logits, array_ops.pack([-1, self.num_classes]))
    samples = random_ops.multinomial(logits_2d, n, seed=seed)
    samples = math_ops.cast(samples, self.dtype)
    ret = array_ops.reshape(
        array_ops.transpose(samples),
        array_ops.concat(0, ([n], self.batch_shape())))
    return ret
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_sparse_tensor(self, input_tensor):
    """Creates a SparseTensor from the bucketized Tensor."""
    dimension = self.source_column.dimension
    batch_size = array_ops.shape(input_tensor, name="shape")[0]

    if dimension > 1:
      i1 = array_ops.reshape(
          array_ops.tile(
              array_ops.expand_dims(
                  math_ops.range(0, batch_size), 1, name="expand_dims"),
              [1, dimension],
              name="tile"), [-1],
          name="rehsape")
      i2 = array_ops.tile(
          math_ops.range(0, dimension), [batch_size], name="tile")
      # Flatten the bucket indices and unique them across dimensions
      # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
      bucket_indices = array_ops.reshape(
          input_tensor, [-1], name="reshape") + self.length * i2
    else:
      # Simpler indices when dimension=1
      i1 = math_ops.range(0, batch_size)
      i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
      bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")

    indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
    shape = math_ops.to_int64(array_ops.pack([batch_size, dimension]))
    sparse_id_values = sparse_tensor_py.SparseTensor(
        indices, bucket_indices, shape)

    return sparse_id_values
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def transpose(labeled_tensor, axis_order=None, name=None):
  """Permute a tensor's axes.

  See tf.transpose.

  Args:
    labeled_tensor: The input tensor.
    axis_order: Optional desired axis order, as a list of names. By default, the
      order of axes is reversed.
    name: Optional op name.

  Returns:
    The permuted tensor.

  Raises:
    ValueError: If axis_order isn't a permutation of the existing axes.
  """
  with ops.name_scope(name, 'lt_transpose', [labeled_tensor]) as scope:
    labeled_tensor = convert_to_labeled_tensor(labeled_tensor)

    original_order = list(labeled_tensor.axes.keys())
    if axis_order is None:
      axis_order = list(reversed(original_order))
    elif sorted(axis_order) != sorted(original_order):
      raise ValueError(
          'The new axis order must have the same names as the original axes, '
          'but the new order is %r while the original order is %r' %
          (axis_order, original_order))

    axis_names = list(labeled_tensor.axes.keys())
    permutation = [axis_names.index(n) for n in axis_order]

    # Note: TensorFlow doesn't copy data for the identity tranpose.
    transpose_tensor = array_ops.transpose(labeled_tensor.tensor,
                                           permutation,
                                           name=scope)

    permuted_axes = [labeled_tensor.axes[n] for n in axis_order]

    return LabeledTensor(transpose_tensor, permuted_axes)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _flip_matrix_to_vector_static(mat, static_batch_shape):
  """Flip matrix to vector with static shapes."""
  mat_rank = mat.get_shape().ndims
  k = mat.get_shape()[-2]
  final_shape = static_batch_shape.concatenate(k)

  # mat.shape = matrix_batch_shape + [k, M]
  # Permutation corresponding to [M] + matrix_batch_shape + [k]
  perm = [mat_rank - 1] + list(range(0, mat_rank - 1))
  mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
  vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
  return vector
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _flip_matrix_to_vector_dynamic(mat, batch_shape):
  """Flip matrix to vector with dynamic shapes."""
  mat_rank = array_ops.rank(mat)
  k = array_ops.gather(array_ops.shape(mat), mat_rank - 2)
  final_shape = array_ops.concat(0, (batch_shape, [k]))

  # mat.shape = matrix_batch_shape + [k, M]
  # Permutation corresponding to [M] + matrix_batch_shape + [k]
  perm = array_ops.concat(
      0, ([mat_rank - 1], math_ops.range(0, mat_rank - 1)))
  mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
  vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
  return vector
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _flip_vector_to_matrix_dynamic(vec, batch_shape):
  """flip_vector_to_matrix with dynamic shapes."""
  # Shapes associated with batch_shape
  batch_rank = array_ops.size(batch_shape)

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = array_ops.shape(vec)
  vec_rank = array_ops.rank(vec)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = array_ops.slice(vec_shape, [0], [m])
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
  k = array_ops.gather(vec_shape, vec_rank - 1)
  new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape))

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat(
        0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
    return array_ops.transpose(vec, perm=perm)

  x_flipped = control_flow_ops.cond(
      math_ops.less(0, m),
      _flip_front_dims_to_back,
      lambda: array_ops.expand_dims(vec, -1))

  return array_ops.reshape(x_flipped, new_shape)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _flip_vector_to_matrix_static(vec, batch_shape):
  """flip_vector_to_matrix with static shapes."""
  # Shapes associated with batch_shape
  batch_rank = batch_shape.ndims

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = vec.get_shape()
  vec_rank = len(vec_shape)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = vec_shape[:m]
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [np.prod(vec_shape_left)]
  k = vec_shape[-1]
  new_shape = batch_shape.concatenate(k).concatenate(condensed_shape)

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat(
        0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
    return array_ops.transpose(vec, perm=perm)

  if 0 < m:
    x_flipped = _flip_front_dims_to_back()
  else:
    x_flipped = array_ops.expand_dims(vec, -1)

  return array_ops.reshape(x_flipped, new_shape)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _sample_n(self, n, seed=None):
    if self.logits.get_shape().ndims == 2:
      logits_2d = self.logits
    else:
      logits_2d = array_ops.reshape(self.logits, [-1, self.num_classes])
    samples = random_ops.multinomial(logits_2d, n, seed=seed)
    samples = math_ops.cast(samples, self.dtype)
    ret = array_ops.reshape(
        array_ops.transpose(samples),
        array_ops.concat(0, ([n], self.batch_shape())))
    return ret
项目:sequencing    作者:SwordYork    | 项目源码 | 文件源码
def _transpose_batch_time(x):
    """Transpose the batch and time dimensions of a Tensor.

    Retains as much of the static shape information as possible.

    Args:
      x: A tensor of rank 2 or higher.

    Returns:
      x transposed along the first two dimensions.

    Raises:
      ValueError: if `x` is rank 1 or lower.
    """
    x_static_shape = x.get_shape()
    if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
        raise ValueError(
            "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
            (x, x_static_shape))
    x_rank = array_ops.rank(x)
    x_t = array_ops.transpose(
        x, array_ops.concat(
            ([1, 0], math_ops.range(2, x_rank)), axis=0))
    x_t.set_shape(
        tensor_shape.TensorShape([
            x_static_shape[1].value, x_static_shape[0].value
        ]).concatenate(x_static_shape[2:]))
    return x_t
项目:automatic-summarization    作者:mozilla    | 项目源码 | 文件源码
def _transpose_batch_time(x):
  """Transpose the batch and time dimensions of a Tensor.

  Retains as much of the static shape information as possible.

  Args:
    x: A tensor of rank 2 or higher.

  Returns:
    x transposed along the first two dimensions.

  Raises:
    ValueError: if `x` is rank 1 or lower.
  """
  x_static_shape = x.get_shape()
  if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
    raise ValueError(
        "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
        (x, x_static_shape))
  x_rank = array_ops.rank(x)
  x_t = array_ops.transpose(
      x, array_ops.concat(
          ([1, 0], math_ops.range(2, x_rank)), axis=0))
  x_t.set_shape(
      tensor_shape.TensorShape([
          x_static_shape[1].value, x_static_shape[0].value
      ]).concatenate(x_static_shape[2:]))
  return x_t
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"

      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u = array_ops.split(1, 2, _linear([inputs, state],
                                  2 * self._num_units, True, 1.0))
        r, u = sigmoid(r), sigmoid(u)

      with vs.variable_scope("Candidate"):
        c = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))

      new_h = u * state + (1 - u) * c

      eps = 1e-13
      temp = math_ops.div(math_ops.reduce_sum(math_ops.mul(new_h, state), 1), \
                          math_ops.reduce_sum(math_ops.mul(state,state), 1) + eps)

      dummy = array_ops.transpose(state)

      t1 = math_ops.mul(dummy, temp)
      t1 = array_ops.transpose(t1)

      distract_h = new_h  -  state * t1

    return distract_h, distract_h
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__): 

      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(1, 2, state)
      concat = _linear([inputs, h], 5 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate, g= distract_gate
      i, j, f, o, g = array_ops.split(1, 5, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))


      eps = 1e-13
      temp = math_ops.div(math_ops.reduce_sum(math_ops.mul(c, new_c),1),math_ops.reduce_sum(math_ops.mul(c,c),1) + eps)

      m = array_ops.transpose(sigmoid(g))
      t1 = math_ops.mul(m , temp)
      t1 = array_ops.transpose(t1) 

      distract_c = new_c  -  c * t1

      new_h = self._activation(distract_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(1, [new_c, new_h])

      return new_h, new_state
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(1, 2, state)
      concat = _linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))


      eps = 1e-13
      temp = math_ops.div(math_ops.reduce_sum(math_ops.mul(c, new_c),1),math_ops.reduce_sum(math_ops.mul(c,c),1) + eps)

      dummy = array_ops.transpose(c)

      t1 = math_ops.mul(dummy, temp)
      t1 = array_ops.transpose(t1) 
      distract_c = new_c  -  t1

      new_h = self._activation(distract_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(1, [new_c, new_h])
      return new_h, new_state
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testOutputSizeRandomSizesAndStridesValidPadding(self):
    np.random.seed(0)
    max_image_size = 10

    for _ in range(10):
      num_filters = 1
      input_size = [
          1, np.random.randint(1, max_image_size),
          np.random.randint(1, max_image_size), 1
      ]
      filter_size = [
          np.random.randint(1, input_size[1] + 1),
          np.random.randint(1, input_size[2] + 1)
      ]
      stride = [np.random.randint(1, 3), np.random.randint(1, 3)]

      ops.reset_default_graph()
      graph = ops.Graph()
      with graph.as_default():
        images = random_ops.random_uniform(input_size, seed=1)
        transpose = layers_lib.conv2d_transpose(
            images, num_filters, filter_size, stride=stride, padding='VALID')
        conv = layers_lib.conv2d(
            transpose, num_filters, filter_size, stride=stride, padding='VALID')

        with self.test_session(graph=graph) as sess:
          sess.run(variables_lib.global_variables_initializer())
          self.assertListEqual(list(conv.eval().shape), input_size)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _runBatchNormalizationWithFormat(self, shape, data_format, is_training):
    channels = shape[-1]
    with self.test_session() as sess:
      images = np.arange(np.product(shape), dtype=np.float32).reshape(shape)
      beta = init_ops.constant_initializer(
          np.arange(
              2, channels + 2, dtype=np.float32))
      gamma = init_ops.constant_initializer(
          np.arange(
              10, channels + 10, dtype=np.float32) * 2.0)
      mean = init_ops.constant_initializer(
          np.arange(
              3, channels + 3, dtype=np.float32) * 5.0)
      variance = init_ops.constant_initializer(
          np.arange(
              1, channels + 1, dtype=np.float32) * 4.0)
      if data_format == 'NCHW':
        # Reshape inputs from NHWC to NCHW format.
        images = array_ops.transpose(
            images, [0, len(shape) - 1] + list(range(1, len(shape) - 1)))
      output = _layers.batch_norm(
          images,
          is_training=is_training,
          scale=True,
          epsilon=0.5,
          param_initializers={
              'beta': beta,
              'gamma': gamma,
              'moving_mean': mean,
              'moving_variance': variance,
          },
          data_format=data_format)
      if data_format == 'NCHW':
        # Reshape outputs from NCHW back to NHWC format.
        output = array_ops.transpose(output,
                                     [0] + list(range(2, len(shape))) + [1])
      sess.run(variables_lib.global_variables_initializer())
      return sess.run(output)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def to_sparse_tensor(self, input_tensor):
    """Creates a SparseTensor from the bucketized Tensor."""
    dimension = self.source_column.dimension
    batch_size = array_ops.shape(input_tensor, name="shape")[0]

    if dimension > 1:
      i1 = array_ops.reshape(
          array_ops.tile(
              array_ops.expand_dims(
                  math_ops.range(0, batch_size), 1, name="expand_dims"),
              [1, dimension],
              name="tile"), [-1],
          name="rehsape")
      i2 = array_ops.tile(
          math_ops.range(0, dimension), [batch_size], name="tile")
      # Flatten the bucket indices and unique them across dimensions
      # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
      bucket_indices = array_ops.reshape(
          input_tensor, [-1], name="reshape") + self.length * i2
    else:
      # Simpler indices when dimension=1
      i1 = math_ops.range(0, batch_size)
      i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
      bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")

    indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
    shape = math_ops.to_int64(array_ops.stack([batch_size, dimension]))
    sparse_id_values = sparse_tensor_py.SparseTensor(
        indices, bucket_indices, shape)

    return sparse_id_values
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_name(self):
    transpose_lt = core.transpose(self.original_lt,
                                  self.original_lt.axes.keys())
    self.assertIn('lt_transpose', transpose_lt.name)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def test_identity(self):
    transpose_lt = core.transpose(self.original_lt,
                                  self.original_lt.axes.keys())
    golden_lt = self.original_lt

    self.assertLabeledTensorsEqual(transpose_lt, golden_lt)