Python tensorflow 模块,int64() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.int64()

项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def test_sequence_feature_not_supported(self):
    feature_spec = {
        # FixedLenSequenceFeatures
        'fixed_seq_bool':
            tf.FixedLenSequenceFeature(shape=[10], dtype=tf.bool),
        'fixed_seq_bool_allow_missing':
            tf.FixedLenSequenceFeature(
                shape=[5], dtype=tf.bool, allow_missing=True),
        'fixed_seq_int':
            tf.FixedLenSequenceFeature(shape=[5], dtype=tf.int64),
        'fixed_seq_float':
            tf.FixedLenSequenceFeature(shape=[5], dtype=tf.float32),
        'fixed_seq_string':
            tf.FixedLenSequenceFeature(shape=[5], dtype=tf.string),
    }

    with self.assertRaisesRegexp(ValueError,
                                 'DatasetSchema does not support '
                                 'FixedLenSequenceFeature yet.'):
      sch.from_feature_spec(feature_spec)
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def read_whole_features(file_pattern, num_epochs=1):
    '''
    Return
        `feature`: `dict` whose keys are `sp`, `ap`, `f0`, `en`, `speaker`
    '''
    files = tf.gfile.Glob(file_pattern)
    print('{} files found'.format(len(files)))
    filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs)
    reader = tf.WholeFileReader()
    key, value = reader.read(filename_queue)
    print("Processing {}".format(key), flush=True)
    value = tf.decode_raw(value, tf.float32)
    value = tf.reshape(value, [-1, FEAT_DIM])
    return {
        'sp': value[:, :SP_DIM],
        'ap': value[:, SP_DIM : 2*SP_DIM],
        'f0': value[:, SP_DIM * 2],
        'en': value[:, SP_DIM * 2 + 1],
        'speaker': tf.cast(value[:, SP_DIM * 2 + 2], tf.int64),
        'filename': key,
    }
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def mnist_batcher_in_tanh_vector(
    batch_size,
    capacity=256,
    min_after_dequeue=128,
    ):
    (x, y), (_, _) = keras.datasets.mnist.load_data()
    x = tf.constant(x)
    x = tf.cast(x, tf.float32)
    x = keras.layers.Flatten()(x) / 127.5 - 1.
    y = tf.cast(y, tf.int64)

    return tf.train.shuffle_batch(
        [x, y],
        batch_size=batch_size,
        capacity=capacity,
        min_after_dequeue=min_after_dequeue,
        enqueue_many=True
    )
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def _validate(self, machine, n=10):
        N = n * n

        # same row same z
        z = tf.random_normal(shape=[n, self.arch['z_dim']])
        z = tf.tile(z, [1, n])
        z = tf.reshape(z, [N, -1])
        z = tf.Variable(z, trainable=False, dtype=tf.float32)       

        # same column same y 
        y = tf.range(0, 10, 1, dtype=tf.int64)
        y = tf.reshape(y, [-1, 1])
        y = tf.tile(y, [n, 1])

        Xh = machine.generate(z, y) # 100, 64, 64, 3
        # Xh = gray2jet(Xh)
        # Xh = make_png_thumbnail(Xh, n)
        Xh = make_png_jet_thumbnail(Xh, n)
        return Xh
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def _validate(self, machine, n=10):
        N = n * n
        z = np.random.normal(0., 1., size=[n, self.arch['z_dim']])
        z = np.concatenate([z] * n, axis=1)
        z = np.reshape(z, [N, -1]).astype(np.float32)  # consecutive rows
        y = np.asarray(
            [[5,   0,  0 ],
             [9,   0,  0 ],
             [12,  0,  0 ],
             [17,  0,  0 ],
             [19,  0,  0 ],
             [161, 0,  0 ],
             [170, 0,  0 ],
             [170, 16, 0 ],
             [161, 9,  4 ],
             [19,  24, 50]],
            dtype=np.int64)
        y = np.concatenate([y] * n, axis=0)
        Z = tf.constant(z)
        Y = tf.constant(y)
        Xh = machine.generate(Z, Y) # 100, 64, 64, 3
        Xh = make_png_thumbnail(Xh, n)
        return Xh
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def _validate(self, machine, n=10):
        N = n * n

        # same row same z
        z = tf.random_normal(shape=[n, self.arch['z_dim']])
        z = tf.tile(z, [1, n])
        z = tf.reshape(z, [N, -1])
        z = tf.Variable(z, trainable=False, dtype=tf.float32)       

        # same column same y 
        y = tf.range(0, 10, 1, dtype=tf.int64)
        y = tf.reshape(y, [-1,])
        y = tf.tile(y, [n,])

        Xh = machine.generate(z, y) # 100, 64, 64, 3
        Xh = make_png_thumbnail(Xh, n)
        return Xh
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def global_step(device=''):
  """Returns the global step variable.

  Args:
    device: Optional device to place the variable. It can be an string or a
      function that is called to get the device for the variable.

  Returns:
    the tensor representing the global step variable.
  """
  global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
  if global_step_ref:
    return global_step_ref[0]
  else:
    collections = [
        VARIABLES_TO_RESTORE,
        tf.GraphKeys.VARIABLES,
        tf.GraphKeys.GLOBAL_STEP,
    ]
    # Get the device for the variable.
    with tf.device(variable_device(device, 'global_step')):
      return tf.get_variable('global_step', shape=[], dtype=tf.int64,
                             initializer=tf.zeros_initializer,
                             trainable=False, collections=collections)
项目:3D_CNN_jonas    作者:2015ZxEE    | 项目源码 | 文件源码
def loss(logits, label_batch):
    """
    Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        logits      -> logits from inference()
        label_batch -> 1D tensor of [batch_size]
    Rtns:
        total_loss  -> float tensor
    """
    # Calculate the average cross entropy loss across the batch.
    label_batch        = tf.cast(label_batch,tf.int64)
    cross_entropy      = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,
                                label_batch,name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses',cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:tensorflow-srgan    作者:olgaliak    | 项目源码 | 文件源码
def create_optimizers(gene_loss, gene_var_list,
                      disc_loss, disc_var_list):    
    # TBD: Does this global step variable need to be manually incremented? I think so.
    global_step    = tf.Variable(0, dtype=tf.int64,   trainable=False, name='global_step')
    learning_rate  = tf.placeholder(dtype=tf.float32, name='learning_rate')

    gene_opti = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                       beta1=FLAGS.learning_beta1,
                                       name='gene_optimizer')
    disc_opti = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                       beta1=FLAGS.learning_beta1,
                                       name='disc_optimizer')

    gene_minimize = gene_opti.minimize(gene_loss, var_list=gene_var_list, name='gene_loss_minimize', global_step=global_step)

    disc_minimize     = disc_opti.minimize(disc_loss, var_list=disc_var_list, name='disc_loss_minimize', global_step=global_step)

    return (global_step, learning_rate, gene_minimize, disc_minimize)
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def make_input_schema(mode=tf.contrib.learn.ModeKeys.TRAIN):
  """Input schema definition.

  Args:
    mode: tf.contrib.learn.ModeKeys specifying if the schema is being used for
      train/eval or prediction.
  Returns:
    A `Schema` object.
  """
  result = ({} if mode == tf.contrib.learn.ModeKeys.INFER
            else {'clicked': tf.FixedLenFeature(shape=[], dtype=tf.int64)})
  for name in INTEGER_COLUMN_NAMES:
    result[name] = tf.FixedLenFeature(
        shape=[], dtype=tf.int64, default_value=-1)
  for name in CATEGORICAL_COLUMN_NAMES:
    result[name] = tf.FixedLenFeature(shape=[], dtype=tf.string,
                                      default_value='')

  return dataset_schema.from_feature_spec(result)
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def example_serving_input_fn(default_batch_size=None):
  """Build the serving inputs.

  Args:
    default_batch_size (int): Batch size for the tf.placeholder shape
  """
  feature_spec = {}
  for feat in CONTINUOUS_COLS:
    feature_spec[feat] = tf.FixedLenFeature(shape=[], dtype=tf.int64)

  for feat, _ in CATEGORICAL_COLS:
    feature_spec[feat] = tf.FixedLenFeature(shape=[], dtype=tf.string)

  example_bytestring = tf.placeholder(
      shape=[default_batch_size],
      dtype=tf.string,
  )
  features = tf.parse_example(example_bytestring, feature_spec)
  return features, {'example': example_bytestring}
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def make_input_schema(mode=tf.contrib.learn.ModeKeys.TRAIN):
  """Input schema definition.

  Args:
    mode: tf.contrib.learn.ModeKeys specifying if the schema is being used for
      train/eval or prediction.
  Returns:
    A `Schema` object.
  """
  result = ({} if mode == tf.contrib.learn.ModeKeys.INFER else {
      'score': tf.FixedLenFeature(shape=[], dtype=tf.float32)
  })
  result.update({
      'subreddit': tf.FixedLenFeature(shape=[], dtype=tf.string),
      'author': tf.FixedLenFeature(shape=[], dtype=tf.string),
      'comment_body': tf.FixedLenFeature(shape=[], dtype=tf.string,
                                         default_value=''),
      'comment_parent_body': tf.FixedLenFeature(shape=[], dtype=tf.string,
                                                default_value=''),
      'toplevel': tf.FixedLenFeature(shape=[], dtype=tf.int64),
  })
  return dataset_schema.from_feature_spec(result)
项目:deep-time-reading    作者:felixduvallet    | 项目源码 | 文件源码
def _loss_shared(logits, labels):
    """Add L2Loss to all the trainable variables.

    Add summary for "Loss" and "Loss/avg".
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size]

    Returns:
      Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits, labels, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def create_torch_variable(self, value, gpu=False):
        """Convenience method that produces a tensor given the value of the defined type.

        Returns: a torch tensor of same type.
        """
        if isinstance(value, torch.autograd.Variable):
            if gpu:
                value = value.cuda()
            return value
        if not torch.is_tensor(value):
            if not isinstance(value, np.ndarray):
                value = np.array(value, dtype=self.dtype.as_numpy_dtype)
            else:
                value = value.astype(self.dtype.as_numpy_dtype)
            if value.size == 0:
                return value
            allowed = [tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.int8]
            if self.dtype in allowed:
                value = torch.autograd.Variable(torch.from_numpy(value))
        else:
            value = torch.autograd.Variable(value)
        if gpu and isinstance(value, torch.autograd.Variable):
            value = value.cuda()
        return value
项目:AssociativeRetrieval    作者:jxwufan    | 项目源码 | 文件源码
def read(self, shuffle=True, num_epochs=None):
    with tf.name_scope('input'):
      reader = tf.TFRecordReader()
      filename_queue = tf.train.string_input_producer([self.filename], num_epochs=num_epochs)
      _, serialized_input = reader.read(filename_queue)
      inputs = tf.parse_single_example(serialized_input,
                                       features={
                                       'inputs_seq': tf.FixedLenFeature([self.seq_len * 2 + 3], tf.int64),
                                       'output': tf.FixedLenFeature([1], tf.int64)
                                       })
      inputs_seq = inputs['inputs_seq']
      output = inputs['output']
      min_after_dequeue = 100
      if shuffle:
        inputs_seqs, outputs = tf.train.shuffle_batch([inputs_seq, output], batch_size=self.batch_size, num_threads=2, capacity=min_after_dequeue + 3 * self.batch_size, min_after_dequeue=min_after_dequeue)
      else:
        inputs_seqs, outputs = tf.train.batch([inputs_seq, output], batch_size=self.batch_size)
      return inputs_seqs, outputs
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def sparse_categorical_crossentropy(output, target, from_logits=False):
    '''Categorical crossentropy between an output tensor
    and a target tensor, where the target is an integer tensor.
    '''
    # Note: tf.nn.softmax_cross_entropy_with_logits
    # expects logits, Keras expects probabilities.
    if not from_logits:
        epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
        output = tf.clip_by_value(output, epsilon, 1 - epsilon)
        output = tf.log(output)

    output_shape = output.get_shape()
    res = tf.nn.sparse_softmax_cross_entropy_with_logits(
        tf.reshape(output, [-1, int(output_shape[-1])]),
        cast(flatten(target), 'int64'))
    if len(output_shape) == 3:
        # if our output includes timesteps we need to reshape
        return tf.reshape(res, [-1, int(output_shape[-2])])
    else:
        return res
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type bool. output_i is True if
        targets_i is within top-k values of predictions_i
    '''
    return tf.nn.in_top_k(predictions, targets, k)


# CONVOLUTIONS
项目:blitznet    作者:dvornikita    | 项目源码 | 文件源码
def data_augmentation(img, gt_bboxes, gt_cats, seg, config):
    params = config['train_augmentation']
    img = apply_with_random_selector(
        img,
        lambda x, ordering: photometric_distortions(x, ordering, params),
        num_cases=4)

    if seg is not None:
        img = tf.concat([img, tf.cast(seg, tf.float32)], axis=-1)

    img, gt_bboxes, gt_cats = scale_distortions(img, gt_bboxes, gt_cats,
                                                params)
    img, gt_bboxes = mirror_distortions(img, gt_bboxes, params)
    # XXX reference implementation also randomizes interpolation method
    img_size = config['image_size']
    img_out = tf.image.resize_images(img[..., :3], [img_size, img_size])
    gt_bboxes, gt_cats = filter_small_gt(gt_bboxes, gt_cats, 2/config['image_size'])

    if seg is not None:
        seg_shape = config['fm_sizes'][0]
        seg = tf.expand_dims(tf.expand_dims(img[..., 3], 0), -1)
        seg = tf.squeeze(tf.image.resize_nearest_neighbor(seg, [seg_shape, seg_shape]))
        seg = tf.cast(tf.round(seg), tf.int64)
    return img_out, gt_bboxes, gt_cats, seg
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def assert_same_float_and_int_dtype(tensors_with_name, dtype=None):
    """
    Whether all types of tensors in `tensors` are the same and floating (or
    integer) type.

    :param tensors_with_name: A list of (tensor, tensor_name).
    :param dtype: Expected type. If `None`, depend on the type of tensors.
    :return: The type of `tensors`.
    """

    available_types = [tf.float16, tf.float32, tf.float64,
                       tf.int16, tf.int32, tf.int64]
    if dtype is None:
        return assert_same_specific_dtype(tensors_with_name, available_types)
    elif dtype in available_types:
        return assert_same_dtype(tensors_with_name, dtype)
    else:
        raise TypeError("The argument 'dtype' must be in %s" % available_types)
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:PixelDCN    作者:HongyangGao    | 项目源码 | 文件源码
def cal_loss(self):
        one_hot_labels = tf.one_hot(
            self.labels, depth=self.conf.class_num,
            axis=self.channel_axis, name='labels/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_labels, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_preds = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        correct_prediction = tf.equal(
            self.labels, self.decoded_preds,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
        # weights = tf.cast(
        #     tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
        #     tf.int32, name='m_iou/weights')
        weights = tf.cast(
            tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
            tf.int64, name='m_iou/weights')
        labels = tf.multiply(self.labels, weights, name='m_iou/mul')
        self.m_iou, self.miou_op = tf.metrics.mean_iou(
            self.labels, self.decoded_preds, self.conf.class_num,
            weights, name='m_iou/m_ious')
项目:TensorFlow-VAE    作者:dancsalo    | 项目源码 | 文件源码
def read_and_decode(self, example_serialized):
        """ Read and decode binarized, raw MNIST dataset from .tfrecords file generated by MNIST.py """
        num = self.flags['num_classes']

        # Parse features from binary file
        features = tf.parse_single_example(
            example_serialized,
            features={
                'image': tf.FixedLenFeature([], tf.string),
                'label': tf.FixedLenFeature([num], tf.int64, default_value=[-1] * num),
                'height': tf.FixedLenFeature([], tf.int64),
                'width': tf.FixedLenFeature([], tf.int64),
                'depth': tf.FixedLenFeature([], tf.int64),
            })
        # Return the converted data
        label = features['label']
        image = tf.decode_raw(features['image'], tf.float32)
        image.set_shape([784])
        image = tf.reshape(image, [28, 28, 1])
        image = (image - 0.5) * 2  # max value = 1, min value = -1
        return image, tf.cast(label, tf.int32)
项目:TensorFlow-VAE    作者:dancsalo    | 项目源码 | 文件源码
def read_and_decode(self, example_serialized):
        """ Read and decode binarized, raw MNIST dataset from .tfrecords file generated by MNIST.py """
        features = tf.parse_single_example(
            example_serialized,
            features={
                'image': tf.FixedLenFeature([], tf.string),
                'label': tf.FixedLenFeature([self.flags['num_classes']], tf.int64, default_value=[-1]*self.flags['num_classes']),
                'height': tf.FixedLenFeature([], tf.int64),
                'width': tf.FixedLenFeature([], tf.int64),
                'depth': tf.FixedLenFeature([], tf.int64),
            })
        # now return the converted data
        label = features['label']
        image = tf.decode_raw(features['image'], tf.float32)
        image.set_shape([784])
        image = tf.reshape(image, [28, 28, 1])
        image = (image - 0.5) * 2  # max value = 1, min value = -1
        return image, tf.cast(label, tf.int32)
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:IDNNs    作者:ravidziv    | 项目源码 | 文件源码
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
项目:deep_learning_study    作者:jowettcz    | 项目源码 | 文件源码
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _impute2D(self, X_2D):
        r"""Mean impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Sum the real values in each column
        col_tot = tf.reduce_sum(data_zeroed_missing_tf, 0)

        # Divide column totals by the number of non-nan values
        num_values_col = tf.reduce_sum(self.real_val_mask, 0)
        num_values_col = tf.maximum(num_values_col,
                                    tf.ones(tf.shape(num_values_col)))
        col_nan_means = tf.div(col_tot, num_values_col)

        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_nan_means, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _impute2D(self, X_2D):
        r"""Randomly impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Divide column totals by the number of non-nan values
        col_draws = [n.sample(seed=next(seedgen)) for n in self.normal_array]
        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_draws, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testIsIterable(self):
    self.assertTrue(base_info._is_iterable((1, 2, 3)))
    self.assertTrue(base_info._is_iterable([1, 2, 3]))
    self.assertTrue(base_info._is_iterable({1: 1, 2: 2, 3: 3}))
    self.assertTrue(base_info._is_iterable(
        collections.OrderedDict([(1, 1), (2, 2)])))
    self.assertTrue(base_info._is_iterable(DumbNamedTuple(1, 2)))
    tensor = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
    self.assertFalse(base_info._is_iterable(set([1, 2, 3])))
    self.assertFalse(base_info._is_iterable(tensor))
    sparse_tensor = tf.SparseTensor(
        indices=tf.placeholder(dtype=tf.int64, shape=(10, 2,)),
        values=tf.placeholder(dtype=tf.float32, shape=(10,)),
        dense_shape=tf.placeholder(dtype=tf.int64, shape=(2,)))
    self.assertFalse(base_info._is_iterable(sparse_tensor))
    self.assertFalse(base_info._is_iterable(NotATensor()))
    self.assertFalse(base_info._is_iterable("foo"))
    def generator():
      for count in xrange(3):
        self.assertFalse(False)
        yield count
    self.assertFalse(base_info._is_iterable(generator))
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testModuleInfo_sparsetensor(self):
    # pylint: disable=not-callable
    tf.reset_default_graph()
    dumb = DumbModule(name="dumb_a")
    sparse_tensor = tf.SparseTensor(
        indices=tf.placeholder(dtype=tf.int64, shape=(10, 2,)),
        values=tf.placeholder(dtype=tf.float32, shape=(10,)),
        dense_shape=tf.placeholder(dtype=tf.int64, shape=(2,)))
    dumb(sparse_tensor)
    def check():
      sonnet_collection = tf.get_default_graph().get_collection(
          base_info.SONNET_COLLECTION_NAME)
      connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
      self.assertIsInstance(
          connected_subgraph.inputs["inputs"], tf.SparseTensor)
      self.assertIsInstance(connected_subgraph.outputs, tf.SparseTensor)
    check()
    _copy_default_graph()
    check()
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def parse_example_proto(example_serialized):
    """Parses an Example proto containing a training example of an image.

       The output of the build_image_data.py image preprocessing script is a dataset
       containing serialized Example protocol buffers.
    """
    # Dense features in Example proto.
    feature_map = {
        'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
                                            default_value=''),
        'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
                                                default_value=-1),
    }

    with tf.name_scope('decode_tfrecord'):
        features = tf.parse_single_example(example_serialized, feature_map)
        image = decode_jpeg(features['image/encoded'])
        label = tf.cast(features['image/class/label'], dtype=tf.int32)

        return image, label
项目:web_page_classification    作者:yuhui-lin    | 项目源码 | 文件源码
def loss(logits, labels):
    """Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        logits: Logits from inference().
        labels: Labels from distorted_inputs or inputs(). 1-D tensor
                of shape [batch_size]
    Returns:
        Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits,
        labels,
        name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def testUniquesAnalyzerWithTokenization(self):
    def preprocessing_fn(inputs):
      return {
          'index': tft.string_to_int(tf.string_split(inputs['a']))
      }

    input_data = [{'a': 'hello hello world'}, {'a': 'hello goodbye world'}]
    input_metadata = dataset_metadata.DatasetMetadata({
        'a': sch.ColumnSchema(tf.string, [], sch.FixedColumnRepresentation())
    })
    expected_data = [{'index': [0, 0, 1]}, {'index': [0, 2, 1]}]
    expected_metadata = dataset_metadata.DatasetMetadata({
        'index': sch.ColumnSchema(
            sch.IntDomain(tf.int64, -1, 2, True,
                          'vocab_string_to_int_uniques'),
            [None], sch.ListColumnRepresentation())
    })
    self.assertAnalyzeAndTransformResults(
        input_data, input_metadata, preprocessing_fn, expected_data,
        expected_metadata)
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def test_example_proto_coder_error(self):
    input_schema = dataset_schema.from_feature_spec({
        '2d_vector_feature': tf.FixedLenFeature(shape=[2, 2], dtype=tf.int64),
    })
    coder = example_proto_coder.ExampleProtoCoder(input_schema)

    example_decoded_value = {
        '2d_vector_feature': [1, 2, 3]
    }
    example_proto_text = """
    features {
      feature { key: "1d_vector_feature"
                value { int64_list { value: [ 1, 2, 3 ] } } }
    }
    """
    example = tf.train.Example()
    text_format.Merge(example_proto_text, example)

    # Ensure that we raise an exception for trying to encode invalid data.
    with self.assertRaisesRegexp(ValueError, 'got wrong number of values'):
      _ = coder.encode(example_decoded_value)

    # Ensure that we raise an exception for trying to parse invalid data.
    with self.assertRaisesRegexp(ValueError, 'got wrong number of values'):
      _ = coder.decode(example.SerializeToString())
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def test_valency(self):
    data = ('11|12,"this is a ,text",categorical_value|other_value,1|3,89.0|'
            '91.0,12.0|15.0,False')
    feature_spec = self._INPUT_SCHEMA.as_feature_spec().copy()
    feature_spec['numeric1'] = tf.FixedLenFeature(shape=[2], dtype=tf.int64)
    schema = dataset_schema.from_feature_spec(feature_spec)
    multivalent_columns = ['numeric1', 'numeric2', 'y']
    coder = csv_coder.CsvCoder(self._COLUMNS, schema,
                               delimiter=',', secondary_delimiter='|',
                               multivalent_columns=multivalent_columns)
    expected_decoded = {'category1': ['categorical_value|other_value'],
                        'numeric1': [11, 12],
                        'numeric2': [89.0, 91.0],
                        'boolean1': [False],
                        'text1': 'this is a ,text',
                        'y': ([1, 3], [12.0, 15.0])}
    self._assert_encode_decode(coder, data, expected_decoded)

  # Test successful decoding with a single column.
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def testInferFeatureSchema(self):
    d = tf.placeholder(tf.int64, None)
    tensors = {
        'a': tf.placeholder(tf.float32, (None,)),
        'b': tf.placeholder(tf.string, (1, 2, 3)),
        'c': tf.placeholder(tf.int64, None),
        'd': d
    }
    d_column_schema = sch.ColumnSchema(tf.int64, [1, 2, 3],
                                       sch.FixedColumnRepresentation())
    api.set_column_schema(d, d_column_schema)
    schema = impl_helper.infer_feature_schema(tf.get_default_graph(), tensors)
    expected_schema = sch.Schema(column_schemas={
        'a': sch.ColumnSchema(tf.float32, [],
                              sch.FixedColumnRepresentation()),
        'b': sch.ColumnSchema(tf.string, [2, 3],
                              sch.FixedColumnRepresentation()),
        'c': sch.ColumnSchema(tf.int64, None,
                              sch.FixedColumnRepresentation()),
        'd': sch.ColumnSchema(tf.int64, [1, 2, 3],
                              sch.FixedColumnRepresentation())
    })
    self.assertEqual(schema, expected_schema)
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def _from_sparse_feature_dict(feature_dict):
  """Translate a JSON sparse feature dict into a ColumnSchema."""
  # assume there is only one value column
  value_feature = feature_dict['valueFeature'][0]
  domain = _from_domain_dict(value_feature['domain'])

  index_feature_dicts = feature_dict['indexFeature']

  # int() is needed because protobuf JSON encodes int64 as string
  axes = [sch.Axis(int(index_feature_dict['size']))
          for index_feature_dict in index_feature_dicts]

  value_field_name = value_feature['name']
  index_fields = [sch.SparseIndexField(index_feature_dict['name'],
                                       index_feature_dict['isSorted'])
                  for index_feature_dict in index_feature_dicts]

  representation = sch.SparseColumnRepresentation(value_field_name,
                                                  index_fields)

  return sch.ColumnSchema(domain, axes, representation)
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def _from_domain_dict(domain):
  """Translate a JSON domain dict into a Domain."""
  if domain.get('ints') is not None:
    def maybe_to_int(s):
      return int(s) if s is not None else None
    return sch.IntDomain(
        tf.int64,
        maybe_to_int(domain['ints'].get('min')),
        maybe_to_int(domain['ints'].get('max')),
        domain['ints'].get('isCategorical'),
        domain['ints'].get('vocabularyFile', ''))
  if domain.get('floats') is not None:
    return sch.FloatDomain(tf.float32)
  if domain.get('strings') is not None:
    return sch.StringDomain(tf.string)
  if domain.get('bools') is not None:
    return sch.BoolDomain(tf.bool)
  raise ValueError('Unknown domain: {}'.format(domain))
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def _make_raw_schema(shape, should_add_unused_feature=False):
  schema = sch.Schema()

  schema.column_schemas['raw_a'] = (sch.ColumnSchema(
      tf.int64, shape, sch.FixedColumnRepresentation(default_value=0)))

  schema.column_schemas['raw_b'] = (sch.ColumnSchema(
      tf.int64, shape, sch.FixedColumnRepresentation(default_value=1)))

  schema.column_schemas['raw_label'] = (sch.ColumnSchema(
      tf.int64, shape, sch.FixedColumnRepresentation(default_value=-1)))

  if should_add_unused_feature:
    schema.column_schemas['raw_unused'] = (sch.ColumnSchema(
        tf.int64, shape, sch.FixedColumnRepresentation(default_value=1)))

  return schema