Python tensorflow 模块,DType() 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用tensorflow.DType()

项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def __new__(cls, dtype=None, shape=None, tag='', tensor=None):
    if tensor is not None:
      if dtype is not None:
        raise TypeError('Specify only one of tensor and dtype.')
      if shape is not None:
        raise TypeError('Specify only one of tensor and shape.')
      dtype = tensor.dtype
      shape = tensor.get_shape().as_list()
    elif not (isinstance(dtype, tf.DType) or
              isinstance(dtype, six.string_types)):
      raise TypeError('%r is not a tf.DType or string' % (dtype,))
    dtype = tf.as_dtype(dtype).base_dtype.name
    if not all(isinstance(s, numbers.Integral) and s >= 0 for s in shape):
      raise TypeError('shape must be non-negative integers: %s' % shape)
    shape = tuple(int(s) for s in shape)
    if not isinstance(tag, six.string_types):
      raise TypeError('A TypeShape tag must be a string; type of %r is %s' %
                      (tag, type(tag)))
    return _TypeShape.__new__(cls, dtype, shape, tag)
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def normalize_num_type(num_type):
    """
    Work out what a sensible type for the array is. if the default type
    is float32, downcast 64bit float to float32. For ints, assume int32
    """
    if isinstance(num_type, tf.DType):
        num_type = num_type.as_numpy_dtype.type

    if num_type in [np.float32, np.float64]:  # pylint: disable=E1101
        num_type = settings.float_type
    elif num_type in [np.int16, np.int32, np.int64]:
        num_type = settings.int_type
    else:
        raise ValueError('Unknown dtype "{0}" passed to normalizer.'.format(num_type))

    return num_type


# def types_array(tensor, shape=None):
#     shape = shape if shape is not None else tensor.shape.as_list()
#     return np.full(shape, tensor.dtype).tolist()
项目:DLTK    作者:DLTK    | 项目源码 | 文件源码
def __init__(self, read_fn, dtypes):
        """Constructs a Reader instance

        Args:
            read_fn: Input function returning features which is a dictionary of
                string feature name to `Tensor` or `SparseTensor`. If it
                returns a tuple, first item is extracted as features.
                Prediction continues until `input_fn` raises an end-of-input
                exception (`OutOfRangeError` or `StopIteration`).
            dtypes:  A nested structure of tf.DType objects corresponding to
                each component of an element yielded by generator.

        """
        self.dtypes = dtypes

        self.read_fn = read_fn
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def __init__(self, shape, dtype='float32'):
    """Creates a tensor type.

    Args:
      shape: A tuple or list of non-negative integers.
      dtype: A `tf.DType`, or stringified version thereof (e.g. `'int64'`).

    Raises:
      TypeError: If `shape` is not a tuple or list of non-negative integers.
      TypeError: If `dtype` cannot be converted to a TF dtype.
    """
    if not isinstance(shape, (tuple, list)):
      raise TypeError('shape must be a tuple or list: %s' % str(shape))
    self._type_shape = loom.TypeShape(dtype, shape)
项目:XMUNMT    作者:XMUNLP    | 项目源码 | 文件源码
def layer_norm(inputs, epsilon=1e-6, dtype=None, scope=None):
    """ Layer Normalization

    Args:
        inputs: A Tensor of shape [..., channel_size]
        epsilon: A floating number
        dtype: An optional instance of tf.DType
        scope: An optional string

    Returns:
            A Tensor with the same shape as inputs
    """
    with tf.variable_scope(scope, default_name="layer_norm", values=[inputs],
                           dtype=dtype):
        channel_size = inputs.get_shape().as_list()[-1]

        scale = tf.get_variable("scale", shape=[channel_size],
                                initializer=tf.ones_initializer())

        offset = tf.get_variable("offset", shape=[channel_size],
                                 initializer=tf.zeros_initializer())

        mean = tf.reduce_mean(inputs, axis=-1, keep_dims=True)
        variance = tf.reduce_mean(tf.square(inputs - mean), axis=-1,
                                  keep_dims=True)

        norm_inputs = (inputs - mean) * tf.rsqrt(variance + epsilon)

        return norm_inputs * scale + offset
项目:hart    作者:akosiorek    | 项目源码 | 文件源码
def __init__(self, img_size, in_memory=True, dtype=np.float32, **kwargs):

        super(ImageStore, self).__init__(**kwargs)
        self.img_size = img_size
        self.in_memory = in_memory

        if isinstance(dtype, tf.DType):
            dtype = getattr(np, dtype.name)

        self.dtype = dtype
        self.lock = Lock()
项目:hart    作者:akosiorek    | 项目源码 | 文件源码
def __init__(self, data_dict, n_timesteps, img_size, batch_size, overlap_fraction=.5,
                 sample_objects=False, num_epochs=None, shuffle=True,
                 which_seqs=None, n_threads=3, in_memory=False, depth_folder=None,
                 storage_dtype=tf.float32, mirror=False, reverse=False, bbox_scale=1., name='',
                 deplete_queues_at_length_increase=True):

        assert isinstance(storage_dtype, tf.DType)

        self.data_dict = data_dict
        self.img_size = img_size
        self.batch_size = batch_size
        self.overlap_fraction = overlap_fraction
        self.sample_objects = sample_objects
        self.n_threads = n_threads
        self.in_memory = in_memory
        self.depth_folder = depth_folder
        self.storage_dtype = storage_dtype
        self.mirror = mirror
        self.reverse = reverse
        self.bbox_scale = bbox_scale
        self.name = name
        self.deplete_queues_at_length_increase = deplete_queues_at_length_increase

        if which_seqs is not None:
            self._filter_seqs(which_seqs)

        super(KittiStore, self).__init__(self.data_dict, num_epochs, shuffle)

        self.set_length(n_timesteps)
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def cast_dtype(dtype, target):
    """Changes float dtypes to the target dtype, leaves others unchanged.

    Used to map all float values to a target precision.  Also casts numpy
    dtypes to TensorFlow dtypes.

    Parameters
    ----------
    dtype : ``tf.DType`` or :class:`~numpy:numpy.dtype`
        Input dtype to be converted
    target : ``tf.DType``
        Floating point dtype to which all floating types should be converted

    Returns
    -------
    ``tf.DType``
        Input dtype, converted to ``target`` type if necessary
    """

    if not isinstance(dtype, tf.DType):
        dtype = tf.as_dtype(dtype)

    if dtype.is_floating:
        dtype = target

    return dtype
项目:odin    作者:imito    | 项目源码 | 文件源码
def _check_dtype(dtype):
  if hasattr(dtype, '__call__'):
    return functionable(dtype)
  # ====== check dtype ====== #
  if dtype is None:
    dtype = K.floatX
  elif isinstance(dtype, np.dtype) or is_string(dtype):
    dtype = str(dtype)
  elif isinstance(dtype, VariableDesc):
    dtype = dtype.dtype
  elif isinstance(dtype, tf.DType):
    dtype = dtype.base_dtype.name
  return dtype
项目:THUMT    作者:thumt    | 项目源码 | 文件源码
def layer_norm(inputs, epsilon=1e-6, dtype=None, scope=None):
    """
    Layer Normalization
    :param inputs: A Tensor of shape [..., channel_size]
    :param epsilon: A floating number
    :param dtype: An optional instance of tf.DType
    :param scope: An optional string
    :returns: A Tensor with the same shape as inputs
    """
    with tf.variable_scope(scope, default_name="layer_norm", values=[inputs],
                           dtype=dtype):
        channel_size = inputs.get_shape().as_list()[-1]

        scale = tf.get_variable("scale", shape=[channel_size],
                                initializer=tf.ones_initializer())

        offset = tf.get_variable("offset", shape=[channel_size],
                                 initializer=tf.zeros_initializer())

        mean = tf.reduce_mean(inputs, axis=-1, keep_dims=True)
        variance = tf.reduce_mean(tf.square(inputs - mean), axis=-1,
                                  keep_dims=True)

        norm_inputs = (inputs - mean) * tf.rsqrt(variance + epsilon)

        return norm_inputs * scale + offset
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def convert_to_type(type_like):
  """Converts `type_like` to a `Type`.

  If `type_like` is already a `Type`, it is returned. The following
  conversions are performed:

  * Python tuples become `Tuple`s; items are recursively converted.

  * A `tf.TensorShape` becomes a corresponding `TensorType` with
  `dtype=float32`. Must be fully defined.

  * Lists of `shape + [dtype]` (e.g. `[3, 4, 'int32']`) become
  `TensorType`s, with the default `dtype=float32` if omitted.

  * A `tf.Dtype` or stringified version thereof (e.g. `'int64'`)
  becomes a corresponding scalar `TensorType((), dtype)`.

  * An integer `vector_len` becomes a corresponding vector
  `TensorType((vector_len,), dtype=float32)`.

  Args:
    type_like: Described above.

  Returns:
    A `Type`.

  Raises:
    TypeError: If `type_like` cannot be converted to a `Type`.

  """
  if isinstance(type_like, ResultType):
    return type_like
  if isinstance(type_like, tf.TensorShape):
    # Check this *before* calling as_list() otherwise it throws.
    if not type_like.is_fully_defined():
      raise TypeError('shape %s is not fully defined' % type_like)
    return TensorType(type_like.as_list())
  if isinstance(type_like, tuple):
    return TupleType(convert_to_type(item) for item in type_like)
  if isinstance(type_like, list):
    if type_like and isinstance(type_like[-1], six.string_types):
      return TensorType(type_like[:-1], dtype=type_like[-1])
    else:
      return TensorType(type_like)
  if isinstance(type_like, tf.DType) or isinstance(type_like, six.string_types):
    return TensorType((), dtype=type_like)
  if isinstance(type_like, numbers.Integral):
    return TensorType((type_like,))
  raise TypeError('Cannot covert %s to a type.' % (type_like,))
项目:XMUNMT    作者:XMUNLP    | 项目源码 | 文件源码
def linear(inputs, output_size, bias, concat=False, dtype=None, scope=None):
    """
    Linear layer

    Args:
        inputs: A Tensor or a list of Tensors with shape [batch, input_size]
        output_size: An integer specify the output size
        bias: a boolean value indicate whether to use bias term
        concat: a boolean value indicate whether to concatenate all inputs
        dtype: an instance of tf.DType, the default value is ``tf.float32''
        scope: the scope of this layer, the default value is ``linear''

    Returns:
         a Tensor with shape [batch, output_size]

    Raises:
        RuntimeError: raises ``RuntimeError'' when input sizes do not
                      compatible with each other
    """

    with tf.variable_scope(scope, default_name="linear", values=[inputs]):
        if not isinstance(inputs, (list, tuple)):
            inputs = [inputs]

        input_size = [item.get_shape()[-1].value for item in inputs]

        if len(inputs) != len(input_size):
            raise RuntimeError("inputs and input_size unmatched!")

        output_shape = tf.concat([tf.shape(inputs[0])[:-1], [output_size]],
                                 axis=0)
        # Flatten to 2D
        inputs = [tf.reshape(inp, [-1, inp.shape[-1].value]) for inp in inputs]

        results = []

        if concat:
            input_size = sum(input_size)
            inputs = tf.concat(inputs, 1)

            shape = [input_size, output_size]
            matrix = tf.get_variable("matrix", shape, dtype=dtype)
            results.append(tf.matmul(inputs, matrix))
        else:
            for i in range(len(input_size)):
                shape = [input_size[i], output_size]
                name = "matrix_%d" % i
                matrix = tf.get_variable(name, shape, dtype=dtype)
                results.append(tf.matmul(inputs[i], matrix))

        output = tf.add_n(results)

        if bias:
            shape = [output_size]
            bias = tf.get_variable("bias", shape, dtype=dtype)
            output = tf.nn.bias_add(output, bias)

        output = tf.reshape(output, output_shape)

        return output
项目:XMUNMT    作者:XMUNLP    | 项目源码 | 文件源码
def attention(query, memories, bias, hidden_size, cache=None, reuse=None,
              dtype=None, scope=None):
    """ Standard attention layer

    Args:
        query: A tensor with shape [batch, key_size]
        memories: A tensor with shape [batch, memory_size, key_size]
        bias: A tensor with shape [batch, memory_size]
        hidden_size: An integer
        cache: A dictionary of precomputed value
        reuse: A boolean value, whether to reuse the scope
        dtype: An optional instance of tf.DType
        scope: An optional string, the scope of this layer

    Return:
        A tensor with shape [batch, value_size] and a Tensor with
        shape [batch, memory_size]
    """

    with tf.variable_scope(scope or "attention", reuse=reuse,
                           values=[query, memories, bias], dtype=dtype):
        mem_shape = tf.shape(memories)
        key_size = memories.get_shape().as_list()[-1]

        if cache is None:
            k = tf.reshape(memories, [-1, key_size])
            k = linear(k, hidden_size, False, False, scope="k_transform")

            if query is None:
                return {"key": k}
        else:
            k = cache["key"]

        q = linear(query, hidden_size, False, False, scope="q_transform")
        k = tf.reshape(k, [mem_shape[0], mem_shape[1], hidden_size])

        hidden = tf.tanh(q[:, None, :] + k)
        hidden = tf.reshape(hidden, [-1, hidden_size])

        logits = linear(hidden, 1, False, False, scope="logits")
        logits = tf.reshape(logits, [-1, mem_shape[1]])

        if bias is not None:
            logits = logits + bias

        alpha = tf.nn.softmax(logits)

        outputs = {
            "value": tf.reduce_sum(alpha[:, :, None] * memories, axis=1),
            "weight": alpha
        }

    return outputs
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def align_func(output_shape, output_dtype):
    """Decorator that ensures the output of ``func`` is an
    :class:`~numpy:numpy.ndarray` with the given shape and dtype.

    Parameters
    ----------
    output_shape : tuple of int
        Desired shape for function output (must have the same size as actual
        function output)
    output_dtype : ``tf.DType`` or :class:`~numpy:numpy.dtype`
        Desired dtype of function output

    Raises
    ------
    :class:`~nengo:nengo.exceptions.SimulationError`
        If the function returns ``None`` or a non-finite value.
    """

    if isinstance(output_dtype, tf.DType):
        output_dtype = output_dtype.as_numpy_dtype

    def apply_align(func):
        def aligned_func(*args):
            output = func(*args)

            if output is None:
                raise SimulationError(
                    "Function %r returned None" %
                    function_name(func, sanitize=False))
            try:
                if not np.all(np.isfinite(output)):
                    raise SimulationError(
                        "Function %r returned invalid value %r" %
                        (function_name(func, sanitize=False), output))
            except (TypeError, ValueError):
                raise SimulationError(
                    "Function %r returned a value %r of invalid type %r" %
                    (function_name(func, sanitize=False), output,
                     type(output)))
            output = np.asarray(output, dtype=output_dtype)
            output = output.reshape(output_shape)
            return output

        return aligned_func

    return apply_align
项目:THUMT    作者:thumt    | 项目源码 | 文件源码
def linear(inputs, output_size, bias, concat=True, dtype=None, scope=None):
    """
    Linear layer
    :param inputs: A Tensor or a list of Tensors with shape [batch, input_size]
    :param output_size: An integer specify the output size
    :param bias: a boolean value indicate whether to use bias term
    :param concat: a boolean value indicate whether to concatenate all inputs
    :param dtype: an instance of tf.DType, the default value is ``tf.float32''
    :param scope: the scope of this layer, the default value is ``linear''
    :returns: a Tensor with shape [batch, output_size]
    :raises RuntimeError: raises ``RuntimeError'' when input sizes do not
                          compatible with each other
    """

    with tf.variable_scope(scope, default_name="linear", values=[inputs]):
        if not isinstance(inputs, (list, tuple)):
            inputs = [inputs]

        input_size = [item.get_shape()[-1].value for item in inputs]

        if len(inputs) != len(input_size):
            raise RuntimeError("inputs and input_size unmatched!")

        output_shape = tf.concat([tf.shape(inputs[0])[:-1], [output_size]],
                                 axis=0)
        # Flatten to 2D
        inputs = [tf.reshape(inp, [-1, inp.shape[-1].value]) for inp in inputs]

        results = []

        if concat:
            input_size = sum(input_size)
            inputs = tf.concat(inputs, 1)

            shape = [input_size, output_size]
            matrix = tf.get_variable("matrix", shape, dtype=dtype)
            results.append(tf.matmul(inputs, matrix))
        else:
            for i in range(len(input_size)):
                shape = [input_size[i], output_size]
                name = "matrix_%d" % i
                matrix = tf.get_variable(name, shape, dtype=dtype)
                results.append(tf.matmul(inputs[i], matrix))

        output = tf.add_n(results)

        if bias:
            shape = [output_size]
            bias = tf.get_variable("bias", shape, dtype=dtype)
            output = tf.nn.bias_add(output, bias)

        output = tf.reshape(output, output_shape)

        return output
项目:THUMT    作者:thumt    | 项目源码 | 文件源码
def attention(query, memories, bias, hidden_size, cache=None, reuse=None,
              dtype=None, scope=None):
    """ Standard attention layer

    :param query: A tensor with shape [batch, key_size]
    :param memories: A tensor with shape [batch, memory_size, key_size]
    :param bias: A tensor with shape [batch, memory_size]
    :param hidden_size: An integer
    :param cache: A dictionary of precomputed value
    :param reuse: A boolean value, whether to reuse the scope
    :param dtype: An optional instance of tf.DType
    :param scope: An optional string, the scope of this layer
    :return: A tensor with shape [batch, value_size] and
        a Tensor with shape [batch, memory_size]
    """

    with tf.variable_scope(scope or "attention", reuse=reuse,
                           values=[query, memories, bias], dtype=dtype):
        mem_shape = tf.shape(memories)
        key_size = memories.get_shape().as_list()[-1]

        if cache is None:
            k = tf.reshape(memories, [-1, key_size])
            k = linear(k, hidden_size, False, False, scope="k_transform")

            if query is None:
                return {"key": k}
        else:
            k = cache["key"]

        q = linear(query, hidden_size, False, False, scope="q_transform")
        k = tf.reshape(k, [mem_shape[0], mem_shape[1], hidden_size])

        hidden = tf.tanh(q[:, None, :] + k)
        hidden = tf.reshape(hidden, [-1, hidden_size])

        # Shape: [batch, mem_size, 1]
        logits = linear(hidden, 1, False, False, scope="logits")
        logits = tf.reshape(logits, [-1, mem_shape[1]])

        if bias is not None:
            logits = logits + bias

        alpha = tf.nn.softmax(logits)

        outputs = {
            "value": tf.reduce_sum(alpha[:, :, None] * memories, axis=1),
            "weight": alpha
        }

    return outputs
项目:THUMT    作者:thumt    | 项目源码 | 文件源码
def additive_attention(queries, keys, values, bias, hidden_size, concat=False,
                       keep_prob=None, dtype=None, scope=None):
    """ Additive attention mechanism. This layer is implemented using a
        one layer feed forward neural network

    :param queries: A tensor with shape [batch, heads, length_q, depth_k]
    :param keys: A tensor with shape [batch, heads, length_kv, depth_k]
    :param values: A tensor with shape [batch, heads, length_kv, depth_v]
    :param bias: A tensor
    :param hidden_size: An integer
    :param concat: A boolean value. If ``concat'' is set to True, then
        the computation of attention mechanism is following $tanh(W[q, k])$.
        When ``concat'' is set to False, the computation is following
        $tanh(Wq + Vk)$
    :param keep_prob: a scalar in [0, 1]
    :param dtype: An optional instance of tf.DType
    :param scope: An optional string, the scope of this layer

    :returns: A dict with the following keys:
        weights: A tensor with shape [batch, length_q]
        outputs: A tensor with shape [batch, length_q, depth_v]
    """

    with tf.variable_scope(scope, default_name="additive_attention",
                           values=[queries, keys, values, bias], dtype=dtype):
        length_q = tf.shape(queries)[2]
        length_kv = tf.shape(keys)[2]
        q = tf.tile(tf.expand_dims(queries, 3), [1, 1, 1, length_kv, 1])
        k = tf.tile(tf.expand_dims(keys, 2), [1, 1, length_q, 1, 1])

        if concat:
            combined = tf.tanh(linear(tf.concat([q, k], axis=-1), hidden_size,
                                      True, True, name="qk_transform"))
        else:
            q = linear(queries, hidden_size, True, True, name="q_transform")
            k = linear(keys, hidden_size, True, True, name="key_transform")
            combined = tf.tanh(q + k)

        # shape: [batch, heads, length_q, length_kv]
        logits = tf.squeeze(linear(combined, 1, True, True, name="logits"),
                            axis=-1)

        if bias is not None:
            logits += bias

        weights = tf.nn.softmax(logits, name="attention_weights")

        if keep_prob or keep_prob < 1.0:
            weights = tf.nn.dropout(weights, keep_prob)

        outputs = tf.matmul(weights, values)

        return {"weights": weights, "outputs": outputs}