Python tensorflow 模块,int16() 实例源码

我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用tensorflow.int16()

项目:jack    作者:uclmr    | 项目源码 | 文件源码
def create_torch_variable(self, value, gpu=False):
        """Convenience method that produces a tensor given the value of the defined type.

        Returns: a torch tensor of same type.
        """
        if isinstance(value, torch.autograd.Variable):
            if gpu:
                value = value.cuda()
            return value
        if not torch.is_tensor(value):
            if not isinstance(value, np.ndarray):
                value = np.array(value, dtype=self.dtype.as_numpy_dtype)
            else:
                value = value.astype(self.dtype.as_numpy_dtype)
            if value.size == 0:
                return value
            allowed = [tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.int8]
            if self.dtype in allowed:
                value = torch.autograd.Variable(torch.from_numpy(value))
        else:
            value = torch.autograd.Variable(value)
        if gpu and isinstance(value, torch.autograd.Variable):
            value = value.cuda()
        return value
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def assert_same_float_and_int_dtype(tensors_with_name, dtype=None):
    """
    Whether all types of tensors in `tensors` are the same and floating (or
    integer) type.

    :param tensors_with_name: A list of (tensor, tensor_name).
    :param dtype: Expected type. If `None`, depend on the type of tensors.
    :return: The type of `tensors`.
    """

    available_types = [tf.float16, tf.float32, tf.float64,
                       tf.int16, tf.int32, tf.int64]
    if dtype is None:
        return assert_same_specific_dtype(tensors_with_name, available_types)
    elif dtype in available_types:
        return assert_same_dtype(tensors_with_name, dtype)
    else:
        raise TypeError("The argument 'dtype' must be in %s" % available_types)
项目:IDNNs    作者:ravidziv    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:dahoam2017    作者:KarimJedda    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      features={
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label_raw': tf.FixedLenFeature([], tf.string),
      })
  image = tf.decode_raw(features['image_raw'], tf.int16)
  image.set_shape([IMAGE_HEIGHT * IMAGE_WIDTH])
  image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
  reshape_image = tf.reshape(image, [IMAGE_HEIGHT, IMAGE_WIDTH, 1])
  label = tf.decode_raw(features['label_raw'], tf.uint8)
  label.set_shape([CHARS_NUM * CLASSES_NUM])
  reshape_label = tf.reshape(label, [CHARS_NUM, CLASSES_NUM])
  return tf.cast(reshape_image, tf.float32), tf.cast(reshape_label, tf.float32)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def diet_adam_optimizer_params():
    """Default hyperparameters for a DietAdamOptimizer.

    Returns:
      a hyperparameters object.
    """
    return tf.contrib.training.HParams(
        quantize=int(True),  # use 16-bit fixed-point
        quantization_scale=10.0 / tf.int16.max,
        optimizer="DietAdam",
        learning_rate=1.0,
        learning_rate_warmup_steps=2000,
        learning_rate_decay_scheme="noam",  # "noam" or "none"
        epsilon=1e-10,
        beta1=0.0,  # we can save memory if beta1=0
        beta2=0.98,
        factored_second_moment_accumulator=int(True),  # this saves memory
    )
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _quantize(x, params, randomize=True):
    """Quantize x according to params, optionally randomizing the rounding."""
    if not params.quantize:
        return x

    if not randomize:
        return tf.bitcast(
            tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

    abs_x = tf.abs(x)
    sign_x = tf.sign(x)
    y = abs_x / params.quantization_scale
    y = tf.floor(y + tf.random_uniform(tf.shape(x)))
    y = tf.minimum(y, tf.int16.max) * sign_x
    q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
    return q
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:neuroimage-tensorflow    作者:corticometrics    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,features={
        'image_raw': tf.FixedLenFeature([], tf.string),
        'label_raw': tf.FixedLenFeature([], tf.string)})
    image  = tf.cast(tf.decode_raw(features['image_raw'], tf.int16), tf.float32)
    labels = tf.decode_raw(features['label_raw'], tf.int16)

    #PW 2017/03/03: Zero-center data here?
    image.set_shape([IMG_DIM*IMG_DIM*IMG_DIM])
    image  = tf.reshape(image, [IMG_DIM,IMG_DIM,IMG_DIM,1])

    labels.set_shape([IMG_DIM*IMG_DIM*IMG_DIM])
    labels  = tf.reshape(image, [IMG_DIM,IMG_DIM,IMG_DIM])

    # Dimensions (X, Y, Z, channles)
    return image, labels
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def diet_adam_optimizer_params():
  """Default hyperparameters for a DietAdamOptimizer.

  Returns:
    a hyperparameters object.
  """
  return tf.contrib.training.HParams(
      quantize=True,  # use 16-bit fixed-point
      quantization_scale=10.0 / tf.int16.max,
      optimizer="DietAdam",
      learning_rate=1.0,
      learning_rate_warmup_steps=2000,
      learning_rate_decay_scheme="noam",  # "noam" or "none"
      epsilon=1e-10,
      beta1=0.0,  # we can save memory if beta1=0
      beta2=0.98,
      factored_second_moment_accumulator=True,  # this saves memory
  )
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:captcha_recognize    作者:PatrickLib    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      features={
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label_raw': tf.FixedLenFeature([], tf.string),
      })
  image = tf.decode_raw(features['image_raw'], tf.int16)
  image.set_shape([IMAGE_HEIGHT * IMAGE_WIDTH])
  image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
  reshape_image = tf.reshape(image, [IMAGE_HEIGHT, IMAGE_WIDTH, 1])
  label = tf.decode_raw(features['label_raw'], tf.uint8)
  label.set_shape([CHARS_NUM * CLASSES_NUM])
  reshape_label = tf.reshape(label, [CHARS_NUM, CLASSES_NUM])
  return tf.cast(reshape_image, tf.float32), tf.cast(reshape_label, tf.float32)
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def testScaleToZScore_int16(self):
    self._testScaleToZScore(input_dtype=tf.int16, output_dtype=tf.float32)
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def testNumericAnalyzersWithScalarInputs_int16(self):
    self._testNumericAnalyzersWithScalarInputs(
        input_dtype=tf.int16,
        output_dtypes={
            'min': tf.int16,
            'max': tf.int16,
            'sum': tf.int16,
            'size': tf.int16,
            'mean': tf.float32,
            'var': tf.float32
        }
    )
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _dequantize(q, params):
    """Dequantize q according to params."""
    if not params.quantize:
        return q
    return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_input_int16(self):
    self._assert_dtype(
        np.int16, tf.int16, np.matrix([[1, 2], [3, 4]], dtype=np.int16))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_input_int16(self):
    self._assert_dtype(
        np.int16, tf.int16, np.matrix([[1, 2], [3, 4]], dtype=np.int16))
项目:single-image-depth-estimation    作者:liuhyCV    | 项目源码 | 文件源码
def train_batch_inputs(dataset_csv_file_path, batch_size):

    with tf.name_scope('batch_processing'):

        if (os.path.isfile(dataset_csv_file_path) != True):
            raise ValueError('No data files found for this dataset')

        filename_queue = tf.train.string_input_producer([dataset_csv_file_path], shuffle=True)
        reader = tf.TextLineReader()
        _, serialized_example = reader.read(filename_queue)
        filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]])

        # input
        png = tf.read_file(filename)
        image = tf.image.decode_png(png, channels=3)
        image = tf.cast(image, tf.float32)
        # target
        depth_png = tf.read_file(depth_filename)
        depth = tf.image.decode_png(depth_png, dtype=tf.uint16, channels=1)
        depth = tf.cast(depth, dtype=tf.int16)

        # resize
        image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
        depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
        invalid_depth = tf.sign(depth)

        # generate batch
        images, depths, invalid_depths = tf.train.batch(
            [image, depth, invalid_depth],
            batch_size = batch_size,
            num_threads = 4,
            capacity = 50 + 3 * batch_size
        )
        return images, depths, invalid_depths
项目:single-image-depth-estimation    作者:liuhyCV    | 项目源码 | 文件源码
def eval_batch_inputs(dataset_csv_file_path, batch_size):

    with tf.name_scope('eval_batch_processing'):

        if (os.path.isfile(dataset_csv_file_path) != True):
            raise ValueError('No data files found for this dataset')

        filename_queue = tf.train.string_input_producer([dataset_csv_file_path], shuffle=True)
        reader = tf.TextLineReader()
        _, serialized_example = reader.read(filename_queue)
        filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]])

        # input
        png = tf.read_file(filename)
        image = tf.image.decode_png(png, channels=3)
        image = tf.cast(image, tf.float32)
        # target
        depth_png = tf.read_file(depth_filename)
        depth = tf.image.decode_png(depth_png, dtype=tf.uint16, channels=1)
        depth = tf.cast(depth, dtype=tf.int16)

        # resize
        image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
        depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
        invalid_depth = tf.sign(depth)

        # generate batch
        images, depths, invalid_depths = tf.train.batch(
            [image, depth, invalid_depth],
            batch_size = batch_size,
            num_threads = 4,
            capacity = 50 + 3 * batch_size
        )
        return images, depths, invalid_depths
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def testConvertBetweenInteger(self):
    try:
      # Make sure converting to between integer types scales appropriately
      with self.test_session():
        self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
        self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
        # itensor is tf.int32, but attr "T" is set to tf.int16
    except:
      import pdb;
      pdb.post_mortem()
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def testConvertBetweenInteger(self):
    try:
      # Make sure converting to between integer types scales appropriately
      with self.test_session():
        self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
        self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
        # itensor is tf.int32, but attr "T" is set to tf.int16
    except:
      import pdb;
      pdb.post_mortem()
项目:imperative    作者:yaroslavvb    | 项目源码 | 文件源码
def testConvertBetweenInteger(self):
    try:
      # Make sure converting to between integer types scales appropriately
      with self.test_session():
        self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
        self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
        # itensor is tf.int32, but attr "T" is set to tf.int16
    except:
      import pdb;
      pdb.post_mortem()
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def _dequantize(q, params):
  """Dequantize q according to params."""
  if not params.quantize:
    return q
  return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale
项目:woipv    作者:Panaetius    | 项目源码 | 文件源码
def __read(self, filename_queue):
        class CocoRecord(object):
            image_raw = []
            bboxes = []
            categories = []
            image_id = -1
            pass

        result = CocoRecord()

        reader = tf.TFRecordReader()

        _, value = reader.read(filename_queue)

        features = tf.parse_single_example(
            value,
            features={
                'labels': tf.FixedLenFeature([1], tf.string),
                'image_raw': tf.FixedLenFeature([1], tf.string),
                'image_width': tf.FixedLenFeature([1], tf.int64),
                'image_height': tf.FixedLenFeature([1], tf.int64)
            })

        result.labels = tf.decode_raw(features['labels'], tf.int16)
        result.image_raw = tf.decode_raw(features['image_raw'], tf.uint8)
        result.width = features['image_width']
        result.height = features['image_height']

        return result
项目:woipv    作者:Panaetius    | 项目源码 | 文件源码
def __unpool(self, updates, mask, ksize=[1, 2, 2, 1], output_shape=None, feature_count=None, name=''):
        with tf.variable_scope(name):
            mask = tf.cast(mask, tf.int32)
            input_shape = tf.shape(updates, out_type=tf.int32)
            #  calculation new shape

            if feature_count is None:
                feature_count = input_shape[3]

            if output_shape is None:
                output_shape = (1, input_shape[1] * ksize[1], input_shape[2] * ksize[2], feature_count)

            output_shape = tf.cast(output_shape, tf.int32)

            # calculation indices for batch, height, width and feature maps
            one_like_mask = tf.cast(tf.ones_like(mask, dtype=tf.int16), tf.int32)
            batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0)
            batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int32), shape=batch_shape)
            b = one_like_mask * batch_range
            y = tf.floordiv(mask, output_shape[2] * output_shape[3])
            x = tf.mod(tf.floordiv(mask, output_shape[3]), output_shape[2]) #mask % (output_shape[2] * output_shape[3]) // output_shape[3]
            feature_range = tf.range(output_shape[3], dtype=tf.int32)
            f = one_like_mask * feature_range
            # transpose indices & reshape update values to one dimension
            updates_size = tf.size(updates)
            indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
            values = tf.reshape(updates, [updates_size])
            ret = tf.scatter_nd(indices, values, output_shape)
            return ret
项目:woipv    作者:Panaetius    | 项目源码 | 文件源码
def __pad_to_size(self, input, target_shape):
        input_shape = tf.shape(input)
        difference = target_shape - input_shape
        offset = tf.cast(tf.zeros_like(difference, dtype=tf.int16), tf.int32)
        padding = tf.concat([tf.expand_dims(difference, axis=1), tf.expand_dims(offset, axis=1)], axis=1)

        return tf.pad(input, padding)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_dtype_1parameter_discrete(test_class, Distribution):
    def _test_sample_dtype(input_, result_dtype, dtype):
        distribution = Distribution(input_, dtype=dtype)
        samples = distribution.sample(2)
        test_class.assertEqual(distribution.dtype, result_dtype)
        test_class.assertEqual(samples.dtype, result_dtype)

    for input_ in [[1.], [[2., 3.], [4., 5.]]]:
        _test_sample_dtype(input_, tf.int32, None)
        _test_sample_dtype(input_, tf.int16, tf.int16)
        _test_sample_dtype(input_, tf.int32, tf.int32)
        _test_sample_dtype(input_, tf.float32, tf.float32)
        _test_sample_dtype(input_, tf.float64, tf.float64)

    def _test_parameter_dtype_raise(param_dtype):
        param = tf.placeholder(param_dtype, [1])
        with test_class.assertRaises(TypeError):
            Distribution(param)

    _test_parameter_dtype_raise(tf.int32)
    _test_parameter_dtype_raise(tf.int64)

    # test dtype for log_prob and prob
    def _test_log_prob_dtype(param_dtype, given_dtype):
        param = tf.placeholder(param_dtype, [1])
        distribution = Distribution(param, dtype=given_dtype)
        test_class.assertEqual(distribution.param_dtype, param_dtype)

        # test for tensor
        given = tf.placeholder(given_dtype, None)
        prob = distribution.prob(given)
        log_prob = distribution.log_prob(given)

        test_class.assertEqual(prob.dtype, param_dtype)
        test_class.assertEqual(log_prob.dtype, param_dtype)

        # test for numpy
        given_np = np.array([1], given_dtype.as_numpy_dtype)
        prob_np = distribution.prob(given_np)
        log_prob_np = distribution.log_prob(given_np)

        test_class.assertEqual(prob_np.dtype, param_dtype)
        test_class.assertEqual(log_prob_np.dtype, param_dtype)

    _test_log_prob_dtype(tf.float16, tf.int32)
    _test_log_prob_dtype(tf.float32, tf.int32)
    _test_log_prob_dtype(tf.float64, tf.int64)
    _test_log_prob_dtype(tf.float32, tf.float32)
    _test_log_prob_dtype(tf.float32, tf.float64)
项目:pruning_with_tensorflow    作者:ex4sperans    | 项目源码 | 文件源码
def _build_network(self,
                       inputs: tf.Tensor,
                       sparse_layers: list,
                       activation_fn: callable) -> tf.Tensor:

        with tf.variable_scope('network'):

            net = inputs

            self.weight_tensors = []

            bias_initializer = tf.constant_initializer(0.1)

            for i, layer in enumerate(sparse_layers):

                with tf.variable_scope('layer_{layer}'.format(layer=i+1)):

                    # create variables based on sparse values                    
                    with tf.variable_scope('sparse'):

                        indicies = tf.get_variable(name='indicies',
                                                   initializer=layer.indices,
                                                   dtype=tf.int16)

                        values = tf.get_variable(name='values',
                                                 initializer=layer.values,
                                                 dtype=tf.float32)

                        dense_shape = tf.get_variable(name='dense_shape',
                                                      initializer=layer.dense_shape,
                                                      dtype=tf.int64)

                    # create a weight tensor based on the created variables
                    weights = tf.sparse_to_dense(tf.cast(indicies, tf.int64),
                                                 dense_shape,
                                                 values)

                    self.weight_tensors.append(weights)

                    name = 'bias'
                    bias = tf.get_variable(name=name,
                                           initializer=layer.bias)

                    net = tf.matmul(net, weights) + bias

                    if i < len(sparse_layers) - 1:
                        net = activation_fn(net)

            return net