Python tensorflow 模块,parse_single_example() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.parse_single_example()

项目:ISLES2017    作者:MiguelMonteiro    | 项目源码 | 文件源码
def parse_example(serialized_example):
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'shape': tf.FixedLenFeature([], tf.string),
            'img_raw': tf.FixedLenFeature([], tf.string),
            'gt_raw': tf.FixedLenFeature([], tf.string),
            'example_name': tf.FixedLenFeature([], tf.string)
        })

    with tf.variable_scope('decoder'):
        shape = tf.decode_raw(features['shape'], tf.int32)
        image = tf.decode_raw(features['img_raw'], tf.float32)
        ground_truth = tf.decode_raw(features['gt_raw'], tf.uint8)
        example_name = features['example_name']

    with tf.variable_scope('image'):
        # reshape and add 0 dimension (would be batch dimension)
        image = tf.expand_dims(tf.reshape(image, shape), 0)
    with tf.variable_scope('ground_truth'):
        # reshape
        ground_truth = tf.cast(tf.reshape(ground_truth, shape[:-1]), tf.float32)
    return image, ground_truth, example_name
项目:AC-GAN    作者:jianpingliu    | 项目源码 | 文件源码
def read_example(self, filename_queue):
        # TFRecoard reader
        reader = tf.TFRecordReader()
        key, serialized_example = reader.read(filename_queue)

        # read data from serialized examples
        features = tf.parse_single_example(
            serialized_example,
            features={
                'label': tf.FixedLenFeature([], tf.int64),
                'image_raw': tf.FixedLenFeature([], tf.string)
            })
        label = features['label']
        image = features['image_raw']

        # decode raw image data as integers
        if self.image_format == 'jpeg':
            decoded_image = tf.image.decode_jpeg(
                image, channels=self.image_channels)
        else:
            decoded_image = tf.decode_raw(image, tf.uint8)

        return decoded_image, label
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def _parse_example(self, serialized):
    """Unpack a serialized example to Tensor."""
    feats = self._get_data_features()
    sz_feats = self._get_sz_features()
    for s in sz_feats:
      feats[s] = sz_feats[s]
    sample = tf.parse_single_example(serialized, features=feats)

    data = {}
    for i, f in enumerate(self.FEATURES):
      s = tf.to_int32(sample[f+'_sz'])

      data[f] = tf.decode_raw(sample[f], self.dtypes[f], name='decode_{}'.format(f))
      data[f] = tf.reshape(data[f], s)

    return data
项目:TFExperiments    作者:gnperdue    | 项目源码 | 文件源码
def parse_mnist_tfrec(tfrecord, features_shape):
    tfrecord_features = tf.parse_single_example(
        tfrecord,
        features={
            'features': tf.FixedLenFeature([], tf.string),
            'targets': tf.FixedLenFeature([], tf.string)
        }
    )
    features = tf.decode_raw(tfrecord_features['features'], tf.uint8)
    features = tf.reshape(features, features_shape)
    features = tf.cast(features, tf.float32)
    targets = tf.decode_raw(tfrecord_features['targets'], tf.uint8)
    targets = tf.reshape(targets, [])
    targets = tf.one_hot(indices=targets, depth=10, on_value=1, off_value=0)
    targets = tf.cast(targets, tf.float32)
    return features, targets
项目:TFExperiments    作者:gnperdue    | 项目源码 | 文件源码
def parse_mnist_tfrec(tfrecord, name, features_shape, scalar_targs=False):
    tfrecord_features = tf.parse_single_example(
        tfrecord,
        features={
            'features': tf.FixedLenFeature([], tf.string),
            'targets': tf.FixedLenFeature([], tf.string)
        },
        name=name+'_data'
    )
    with tf.variable_scope('features'):
        features = tf.decode_raw(
            tfrecord_features['features'], tf.uint8
        )
        features = tf.reshape(features, features_shape)
        features = tf.cast(features, tf.float32)
    with tf.variable_scope('targets'):
        targets = tf.decode_raw(tfrecord_features['targets'], tf.uint8)
        if scalar_targs:
            targets = tf.reshape(targets, [])
        targets = tf.one_hot(
            indices=targets, depth=10, on_value=1, off_value=0
        )
        targets = tf.cast(targets, tf.float32)
    return features, targets
项目:tf-sr-zoo    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def read_and_decode(filename_queue, batch_size):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    feature = features()
    feature = tf.parse_single_example(
        serialized_example,
        features = feature,
        )
    hr_image = tf.decode_raw(feature['hr_image'], tf.uint8)
    height = tf.cast(feature['height'], tf.int32)
    width = tf.cast(feature['width'], tf.int32)
    print(height)
    image_shape = tf.stack([128, 128,3 ])
    hr_image = tf.reshape(hr_image, image_shape)
    hr_image = tf.image.random_flip_left_right(hr_image)
    hr_image = tf.image.random_contrast(hr_image, 0.5, 1.3)
    hr_images = tf.train.shuffle_batch([hr_image], batch_size = batch_size, capacity = 30,
                                      num_threads = 2,
                                        min_after_dequeue = 10)
    return hr_images
项目:AssociativeRetrieval    作者:jxwufan    | 项目源码 | 文件源码
def read(self, shuffle=True, num_epochs=None):
    with tf.name_scope('input'):
      reader = tf.TFRecordReader()
      filename_queue = tf.train.string_input_producer([self.filename], num_epochs=num_epochs)
      _, serialized_input = reader.read(filename_queue)
      inputs = tf.parse_single_example(serialized_input,
                                       features={
                                       'inputs_seq': tf.FixedLenFeature([self.seq_len * 2 + 3], tf.int64),
                                       'output': tf.FixedLenFeature([1], tf.int64)
                                       })
      inputs_seq = inputs['inputs_seq']
      output = inputs['output']
      min_after_dequeue = 100
      if shuffle:
        inputs_seqs, outputs = tf.train.shuffle_batch([inputs_seq, output], batch_size=self.batch_size, num_threads=2, capacity=min_after_dequeue + 3 * self.batch_size, min_after_dequeue=min_after_dequeue)
      else:
        inputs_seqs, outputs = tf.train.batch([inputs_seq, output], batch_size=self.batch_size)
      return inputs_seqs, outputs
项目:TensorFlow-VAE    作者:dancsalo    | 项目源码 | 文件源码
def read_and_decode(self, example_serialized):
        """ Read and decode binarized, raw MNIST dataset from .tfrecords file generated by MNIST.py """
        num = self.flags['num_classes']

        # Parse features from binary file
        features = tf.parse_single_example(
            example_serialized,
            features={
                'image': tf.FixedLenFeature([], tf.string),
                'label': tf.FixedLenFeature([num], tf.int64, default_value=[-1] * num),
                'height': tf.FixedLenFeature([], tf.int64),
                'width': tf.FixedLenFeature([], tf.int64),
                'depth': tf.FixedLenFeature([], tf.int64),
            })
        # Return the converted data
        label = features['label']
        image = tf.decode_raw(features['image'], tf.float32)
        image.set_shape([784])
        image = tf.reshape(image, [28, 28, 1])
        image = (image - 0.5) * 2  # max value = 1, min value = -1
        return image, tf.cast(label, tf.int32)
项目:TensorFlow-VAE    作者:dancsalo    | 项目源码 | 文件源码
def read_and_decode(self, example_serialized):
        """ Read and decode binarized, raw MNIST dataset from .tfrecords file generated by MNIST.py """
        features = tf.parse_single_example(
            example_serialized,
            features={
                'image': tf.FixedLenFeature([], tf.string),
                'label': tf.FixedLenFeature([self.flags['num_classes']], tf.int64, default_value=[-1]*self.flags['num_classes']),
                'height': tf.FixedLenFeature([], tf.int64),
                'width': tf.FixedLenFeature([], tf.int64),
                'depth': tf.FixedLenFeature([], tf.int64),
            })
        # now return the converted data
        label = features['label']
        image = tf.decode_raw(features['image'], tf.float32)
        image.set_shape([784])
        image = tf.reshape(image, [28, 28, 1])
        image = (image - 0.5) * 2  # max value = 1, min value = -1
        return image, tf.cast(label, tf.int32)
项目:dahoam2017    作者:KarimJedda    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      features={
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label_raw': tf.FixedLenFeature([], tf.string),
      })
  image = tf.decode_raw(features['image_raw'], tf.int16)
  image.set_shape([IMAGE_HEIGHT * IMAGE_WIDTH])
  image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
  reshape_image = tf.reshape(image, [IMAGE_HEIGHT, IMAGE_WIDTH, 1])
  label = tf.decode_raw(features['label_raw'], tf.uint8)
  label.set_shape([CHARS_NUM * CLASSES_NUM])
  reshape_label = tf.reshape(label, [CHARS_NUM, CLASSES_NUM])
  return tf.cast(reshape_image, tf.float32), tf.cast(reshape_label, tf.float32)
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def parse_example_proto(example_serialized):
    """Parses an Example proto containing a training example of an image.

       The output of the build_image_data.py image preprocessing script is a dataset
       containing serialized Example protocol buffers.
    """
    # Dense features in Example proto.
    feature_map = {
        'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
                                            default_value=''),
        'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
                                                default_value=-1),
    }

    with tf.name_scope('decode_tfrecord'):
        features = tf.parse_single_example(example_serialized, feature_map)
        image = decode_jpeg(features['image/encoded'])
        label = tf.cast(features['image/class/label'], dtype=tf.int32)

        return image, label
项目:text-classification2    作者:yuhui-lin    | 项目源码 | 文件源码
def read_and_decode_embedding(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        # Defaults are not specified since both keys are required.
        features={
            'label': tf.FixedLenFeature(
                [], tf.int64),
            'sequence_raw': tf.FixedLenFeature(
                [], tf.string),
        })
    sequence = features['sequence_raw']

    # preprocess
    s_decode = tf.decode_raw(sequence, tf.int32)
    s_decode.set_shape([FLAGS.embed_length])

    # Convert label from a scalar uint8 tensor to an int32 scalar.
    label = tf.cast(features['label'], tf.int32)

    return s_decode, label
项目:source_separation_ml_jeju    作者:hjkwon0609    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    features = tf.parse_single_example(serialized_example, features={
        'song_spec': tf.FixedLenFeature([], tf.string),
        'voice_spec': tf.FixedLenFeature([], tf.string),
        'mixed_spec': tf.FixedLenFeature([], tf.string)
        })

    song_spec = transform_spec_from_raw(features['song_spec'])
    voice_spec = transform_spec_from_raw(features['voice_spec'])
    mixed_spec = transform_spec_from_raw(features['mixed_spec'])

    input_spec = stack_spectrograms(mixed_spec)  # this will be the input

    target_spec = tf.concat([song_spec, voice_spec], axis=1) # target spec is going to be a concatenation of song_spec and voice_spec

    return input_spec, target_spec
项目:cnn_picture_gazebo    作者:liuyandong1988    | 项目源码 | 文件源码
def decode_from_tfrecords(filename,num_epoch=None):
    filename_queue=tf.train.string_input_producer([filename],num_epochs=num_epoch)#???????????????????????????????????????
    reader=tf.TFRecordReader()
    _,serialized=reader.read(filename_queue)
    example=tf.parse_single_example(serialized,features={
        'height':tf.FixedLenFeature([],tf.int64),
        'width':tf.FixedLenFeature([],tf.int64),
        'nchannel':tf.FixedLenFeature([],tf.int64),
        'image':tf.FixedLenFeature([],tf.string),
        'label':tf.FixedLenFeature([],tf.int64)
    })
    label=tf.cast(example['label'], tf.int32)
    image=tf.decode_raw(example['image'],tf.uint8)
    image=tf.reshape(image,tf.pack([
        tf.cast(example['height'], tf.int32),
        tf.cast(example['width'], tf.int32),
        tf.cast(example['nchannel'], tf.int32)]))
    return image,label
项目:keras_experiments    作者:avolkov1    | 项目源码 | 文件源码
def _deserialize_image_record(cls, record):
        feature_map = {
            'image/encoded': tf.FixedLenFeature([], tf.string, ''),
            'image/class/label': tf.FixedLenFeature([1], tf.int64, -1),
            'image/class/text': tf.FixedLenFeature([], tf.string, ''),
            'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
            'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
            'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
            'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32)
        }
        with tf.name_scope('deserialize_image_record'):
            obj = tf.parse_single_example(record, feature_map)
            imgdata = obj['image/encoded']
            label = tf.cast(obj['image/class/label'], tf.int32)
            bbox = tf.stack([obj['image/object/bbox/%s' % x].values
                             for x in ['ymin', 'xmin', 'ymax', 'xmax']])
            bbox = tf.transpose(tf.expand_dims(bbox, 0), [0, 2, 1])
            text = obj['image/class/text']
            return imgdata, label, bbox, text
项目:tensorflow-yys    作者:ystyle    | 项目源码 | 文件源码
def read_and_decode(filename, batch_size):
    # ???????????
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)  # ????????
    features = tf.parse_single_example(
        serialized_example,
        features={
            'label': tf.FixedLenFeature([], tf.int64),
            'img_raw': tf.FixedLenFeature([], tf.string),
        }
    )
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    print('xxxx: ', img.get_shape())
    img = tf.reshape(img, [512, 144, 3])
    img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int32)
    image_batch, label_batch = tf.train.batch([img, label],
                                              batch_size=batch_size,
                                              num_threads=64,
                                              capacity=2000)
    return image_batch, tf.reshape(label_batch, [batch_size])
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def read_and_decode(filename, img_size=128, depth=1):
    if not filename.endswith('.tfrecords'):
        print "Invalid file \"{:s}\"".format(filename)
        return [], []
    else:
        data_queue = tf.train.string_input_producer([filename])

        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(data_queue) 
        features = tf.parse_single_example(serialized_example,
                   features={
                             'label'   : tf.FixedLenFeature([], tf.int64),
                             'img_raw' : tf.FixedLenFeature([], tf.string),
                            })

        img = tf.decode_raw(features['img_raw'], tf.uint8)
        img = tf.reshape(img, [img_size, img_size, depth])
        # Normalize the image
        img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
        label = tf.cast(features['label'], tf.int32)
        label_onehot = tf.stack(tf.one_hot(label, n_classes))
        return img, label_onehot
#read_and_decode('test.tfrecords')
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def read_and_decode(filename, img_size=128, depth=1):
    if not filename.endswith('.tfrecords'):
        print "Invalid file \"{:s}\"".format(filename)
        return [], []
    else:
        data_queue = tf.train.string_input_producer([filename])

        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(data_queue) 
        features = tf.parse_single_example(serialized_example,
                   features={
                             'label'   : tf.FixedLenFeature([], tf.int64),
                             'img_raw' : tf.FixedLenFeature([], tf.string),
                            })
        img = tf.decode_raw(features['img_raw'], tf.uint8)
        img = tf.reshape(img, [img_size, img_size, depth])
        # Normalize the image
        img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
        label = tf.cast(features['label'], tf.int32)
        label_onehot = tf.stack(tf.one_hot(label, n_classes))
        return img, label_onehot
#read_and_decode('test.tfrecords')
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def read_and_decode(filename, img_size=128, depth=1):
    if not filename.endswith('.tfrecords'):
        print "Invalid file \"{:s}\"".format(filename)
        return [], []
    else:
        data_queue = tf.train.string_input_producer([filename])

        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(data_queue) 
        features = tf.parse_single_example(serialized_example,
                   features={
                             'label'   : tf.FixedLenFeature([], tf.int64),
                             'img_raw' : tf.FixedLenFeature([], tf.string),
                            })

        img = tf.decode_raw(features['img_raw'], tf.uint8)
        img = tf.reshape(img, [img_size, img_size, depth])
        # Normalize the image
        img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
        label = tf.cast(features['label'], tf.int32)
        label_onehot = tf.stack(tf.one_hot(label, n_classes))
        return img, label_onehot
项目:age-gender-classification    作者:yunsangq    | 项目源码 | 文件源码
def parse_example_proto(example_serialized):
    # Dense features in Example proto.
    feature_map = {
        'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
                                            default_value=''),
        'image/filename': tf.FixedLenFeature([], dtype=tf.string,
                                             default_value=''),

        'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
                                                default_value=-1),
        'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
                                               default_value=''),
        'image/height': tf.FixedLenFeature([1], dtype=tf.int64,
                                           default_value=-1),
        'image/width': tf.FixedLenFeature([1], dtype=tf.int64,
                                          default_value=-1),

    }

    features = tf.parse_single_example(example_serialized, feature_map)
    label = tf.cast(features['image/class/label'], dtype=tf.int32)
    return features['image/encoded'], label, features['image/filename']
项目:Super_TF    作者:Dhruv-Mohan    | 项目源码 | 文件源码
def single_read(self):
        features = tf.parse_single_example(self.serialized_example, features=self._Feature_dict)
        image = tf.image.decode_image(features[self._Image_handle])
        image.set_shape(self.image_shape)
        image = tf.image.convert_image_dtype(image, tf.float32)
        image = image - self.mean_image
        #Alright we've got images, now to get seqs and masks
        complete_seq =  features[self._Seq_handle]
        complete_mask = features[self._Seq_mask]
        '''
        decoded_seq = self.get_seq(complete_seq)
        decoded_mask = self.get_seq(complete_mask)

        sequence_lenght = len(complete_seq)
        input_seq = decoded_seq[0:sequence_lenght-1]
        target_seq = decoded_seq[1:sequence_lenght]
        final_mask = decoded_mask[0:sequence_lenght-1]
        '''
        return image, complete_seq, complete_mask
项目:faststyle    作者:ghwatson    | 项目源码 | 文件源码
def read_my_file_format(filename_queue, resize_shape=None):
    """Sets up part of the pipeline that takes elements from the filename queue
    and turns it into a tf.Tensor of a batch of images.

    :param filename_queue:
        tf.train.string_input_producer object
    :param resize_shape:
        2 element list defining the shape to resize images to.
    """
    reader = tf.TFRecordReader()
    key, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example, features={
            'image/encoded': tf.FixedLenFeature([], tf.string),
            'image/height': tf.FixedLenFeature([], tf.int64),
            'image/channels': tf.FixedLenFeature([], tf.int64),
            'image/width': tf.FixedLenFeature([], tf.int64)})
    example = tf.image.decode_jpeg(features['image/encoded'], 3)
    processed_example = preprocessing(example, resize_shape)
    return processed_example
项目:yolo-tf    作者:ruiminshen    | 项目源码 | 文件源码
def decode_image_objects(paths):
    with tf.name_scope(inspect.stack()[0][3]):
        with tf.name_scope('parse_example'):
            reader = tf.TFRecordReader()
            _, serialized = reader.read(tf.train.string_input_producer(paths))
            example = tf.parse_single_example(serialized, features={
                'imagepath': tf.FixedLenFeature([], tf.string),
                'imageshape': tf.FixedLenFeature([3], tf.int64),
                'objects': tf.FixedLenFeature([2], tf.string),
            })
        imagepath = example['imagepath']
        objects = example['objects']
        with tf.name_scope('decode_objects'):
            objects_class = tf.decode_raw(objects[0], tf.int64, name='objects_class')
            objects_coord = tf.decode_raw(objects[1], tf.float32)
            objects_coord = tf.reshape(objects_coord, [-1, 4], name='objects_coord')
        with tf.name_scope('load_image'):
            imagefile = tf.read_file(imagepath)
            image = tf.image.decode_jpeg(imagefile, channels=3)
    return image, example['imageshape'], objects_class, objects_coord
项目:CellDetection    作者:quqixun    | 项目源码 | 文件源码
def decode_record(filename_queue, patch_size,
                  channel_num=3):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'label': tf.FixedLenFeature([], tf.int64),
            'image': tf.FixedLenFeature([], tf.string),
        })

    img = tf.decode_raw(features['image'], tf.uint8)
    img = tf.reshape(img, [patch_size, patch_size, channel_num])
    img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int32)

    return img, label
项目:Automatic_Speech_Recognition    作者:zzw922cn    | 项目源码 | 文件源码
def read(filename_queue, feature_num=2, dtypes=[list, int]):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  feature_dict={}
  for i in range(feature_num):
    # here, only three data types are allowed: tf.float32, tf.int64, tf.string
    if dtypes[i] is int:
      feature_dict['feature'+str(i+1)]=tf.FixedLenFeature([], tf.int64)
    else:
      feature_dict['feature'+str(i+1)]=tf.FixedLenFeature([], tf.string)
  features = tf.parse_single_example(
      serialized_example,
      features=feature_dict)
  return features

#======================================================================================
## test code
项目:neuro-stereo    作者:lugu    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      # Defaults are not specified since both keys are required.
      features={
          'image_left': tf.FixedLenFeature([], tf.string),
          'image_right': tf.FixedLenFeature([], tf.string),
      })

  image_left = tf.decode_raw(features['image_left'], tf.uint8)
  image_right = tf.decode_raw(features['image_right'], tf.uint8)
  width = 960
  height = 540
  depth = 4
  image_left.set_shape([width*height*depth])
  image_right.set_shape([width*height*depth])

  return image_left, image_right
项目:hdrnet    作者:google    | 项目源码 | 文件源码
def _parse_example(self, serialized):
    """Unpack a serialized example to Tensor."""
    feats = self._get_data_features()
    sz_feats = self._get_sz_features()
    for s in sz_feats:
      feats[s] = sz_feats[s]
    sample = tf.parse_single_example(serialized, features=feats)

    data = {}
    for i, f in enumerate(self.FEATURES):
      s = tf.to_int32(sample[f+'_sz'])

      data[f] = tf.decode_raw(sample[f], self.dtypes[f], name='decode_{}'.format(f))
      data[f] = tf.reshape(data[f], s)

    return data
项目:streetview    作者:ydnaandy123    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
            serialized_example,
            features={
                'image_raw': tf.FixedLenFeature([], tf.string),
            })

    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image.set_shape(128 * 128 * 3)
    image = tf.reshape(image, [128, 128, 3])

    image = tf.cast(image, tf.float32) * (2. / 255) - 1.

    return image
项目:streetview    作者:ydnaandy123    | 项目源码 | 文件源码
def read_and_decode_with_labels(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
            serialized_example,
            features={
                'image_raw': tf.FixedLenFeature([], tf.string),
                'label' : tf.FixedLenFeature([], tf.int64)
            })

    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image.set_shape(128 * 128 * 3)
    image = tf.reshape(image, [128, 128, 3])

    image = tf.cast(image, tf.float32) * (2. / 255) - 1.

    label = tf.cast(features['label'], tf.int32)

    return image, label
项目:num-seq-recognizer    作者:gmlove    | 项目源码 | 文件源码
def batches(data_file_path, max_number_length, batch_size, size,
            num_preprocess_threads=1, is_training=True, channels=1):
  filename_queue = tf.train.string_input_producer([data_file_path])
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
    serialized_example,
    features={
      'image_png': tf.FixedLenFeature([], tf.string),
      'label': tf.FixedLenFeature([max_number_length], tf.int64),
      'length': tf.FixedLenFeature([1], tf.int64),
      'bbox': tf.FixedLenFeature([4], tf.int64),
    })
  image, bbox, label, length = features['image_png'], features['bbox'], features['label'], features['length']
  bbox = tf.cast(bbox, tf.int32)
  dequeued_data = []
  for i in range(num_preprocess_threads):
    dequeued_img = tf.image.decode_png(image, channels)
    dequeued_img = resize_image(dequeued_img, bbox, is_training, size, channels)
    dequeued_data.append([dequeued_img, tf.one_hot(length - 1, max_number_length)[0], tf.one_hot(label, 11)])

  return tf.train.batch_join(dequeued_data, batch_size=batch_size, capacity=batch_size * 3)
项目:neuroimage-tensorflow    作者:corticometrics    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,features={
        'image_raw': tf.FixedLenFeature([], tf.string),
        'label_raw': tf.FixedLenFeature([], tf.string)})
    image  = tf.cast(tf.decode_raw(features['image_raw'], tf.int16), tf.float32)
    labels = tf.decode_raw(features['label_raw'], tf.int16)

    #PW 2017/03/03: Zero-center data here?
    image.set_shape([IMG_DIM*IMG_DIM*IMG_DIM])
    image  = tf.reshape(image, [IMG_DIM,IMG_DIM,IMG_DIM,1])

    labels.set_shape([IMG_DIM*IMG_DIM*IMG_DIM])
    labels  = tf.reshape(image, [IMG_DIM,IMG_DIM,IMG_DIM])

    # Dimensions (X, Y, Z, channles)
    return image, labels
项目:deepcake    作者:ericyue    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      features={
          "label": tf.FixedLenFeature([], tf.float32),
          "categorical_features": tf.FixedLenFeature([CATEGORICAL_FEATURES_SIZE], tf.string),
          "continuous_features": tf.FixedLenFeature([CONTINUOUS_FEATURES_SIZE], tf.float32),
      })
  label = features["label"]
  continuous_features = features["continuous_features"]
  categorical_features = tf.cast(tf.string_to_hash_bucket(features["categorical_features"], BUCKET_SIZE), tf.float32)
  return label, tf.concat(0, [continuous_features, categorical_features])


# Read serialized examples from filename queue
项目:tensorflow_fasttext    作者:apcode    | 项目源码 | 文件源码
def test_parse_spec():
    fc = FeatureColumns(
        True,
        False,
        VOCAB_FILE,
        VOCAB_SIZE,
        10,
        10,
        1000,
        10)
    parse_spec = tf.feature_column.make_parse_example_spec(fc)
    print parse_spec
    reader = tf.python_io.tf_record_iterator(INPUT_FILE)
    sess = tf.Session()
    for record in reader:
        example = tf.parse_single_example(
            record,
            parse_spec)
        print sess.run(example)
        break
项目:view-finding-network    作者:yiling-chen    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
      # Defaults are not specified since both keys are required.
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
        })

    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image = tf.reshape(image, [227, 227, 6])

  # Convert from [0, 255] -> [-0.5, 0.5] floats.
    image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
    return tf.split(image, 2, 2) # 3rd dimension two parts
项目:view-finding-network    作者:yiling-chen    | 项目源码 | 文件源码
def read_and_decode_aug(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
      # Defaults are not specified since both keys are required.
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
        })

    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image = tf.image.random_flip_left_right(tf.reshape(image, [227, 227, 6]))
  # Convert from [0, 255] -> [-0.5, 0.5] floats.
    image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
    image = tf.image.random_brightness(image, 0.01)
    image = tf.image.random_contrast(image, 0.95, 1.05)
    return tf.split(image, 2, 2) # 3rd dimension two parts
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def read_from_tfrecord(filenames):
    tfrecord_file_queue = tf.train.string_input_producer(filenames,name='queue')
    reader = tf.TFRecordReader()
    _,tfrecord_serialized = reader.read(tfrecord_file_queue)

    tfrecord_features = tf.parse_single_example(tfrecord_serialized,features={
        'label':tf.FixedLenFeature([],tf.int64),
        'shape':tf.FixedLenFeature([],tf.string),
        'image':tf.FixedLenFeature([],tf.string),
    },name='features')


    image = tf.decode_raw(tfrecord_features['image'],tf.uint8)
    shape = tf.decode_raw(tfrecord_features['shape'],tf.int32)

    image = tf.reshape(image,shape)
    label = tfrecord_features['label']
    return label,shape,image
项目:gong_an_pictures    作者:oukohou    | 项目源码 | 文件源码
def read_decode_tfrecords(records_path, num_epochs=1020, batch_size=Flags.batch_size, num_threads=2):
    if gfile.IsDirectory(records_path):
        records_path = [os.path.join(records_path, i) for i in os.listdir(records_path)]
    else:
        records_path = [records_path]
    records_path_queue = tf.train.string_input_producer(records_path, seed=123,
                                                        num_epochs=num_epochs,
                                                        name="string_input_producer")
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(records_path_queue, name="serialized_example")
    features = tf.parse_single_example(serialized=serialized_example,
                                       features={"img_raw": tf.FixedLenFeature([], tf.string),
                                                 "label": tf.FixedLenFeature([], tf.int64),
                                                 "height": tf.FixedLenFeature([], tf.int64),
                                                 "width": tf.FixedLenFeature([], tf.int64),
                                                 "depth": tf.FixedLenFeature([], tf.int64)},
                                       name="parse_single_example")
    image = tf.decode_raw(features["img_raw"], tf.uint8, name="decode_raw")
    image.set_shape([height * width * 3])
    image = tf.cast(image, tf.float32) * (1.0 / 255) - 0.5
    label = tf.cast(features["label"], tf.int32)
    images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_threads,
                                            name="shuffle_bath", capacity=1020, min_after_dequeue=64)
    print("images' shape is :", str(images.shape))
    return images, labels
项目:gong_an_pictures    作者:oukohou    | 项目源码 | 文件源码
def read_decode_tfrecords(records_path, num_epochs=1, batch_size=Flags.batch_size, num_threads=1):
    if gfile.IsDirectory(records_path):
        records_path = [os.path.join(records_path, i) for i in os.listdir(records_path)]
    else:
        records_path = [records_path]
    records_path_queue = tf.train.string_input_producer(records_path, seed=123,
                                                        num_epochs=None,
                                                        name="string_input_producer")
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(records_path_queue, name="serialized_example")
    features = tf.parse_single_example(serialized=serialized_example,
                                       features={"img_raw": tf.FixedLenFeature([], tf.string),
                                                 "label": tf.FixedLenFeature([], tf.int64),
                                                 "height": tf.FixedLenFeature([], tf.int64),
                                                 "width": tf.FixedLenFeature([], tf.int64),
                                                 "depth": tf.FixedLenFeature([], tf.int64)},
                                       name="parse_single_example")
    image = tf.decode_raw(features["img_raw"], tf.uint8, name="decode_raw")
    image.set_shape([IMAGE_PIXELS])
    image = tf.cast(image, tf.float32) * (1.0 / 255) - 0.5
    label = tf.cast(features["label"], tf.int32)
    images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_threads,
                                            name="shuffle_bath", capacity=1020, min_after_dequeue=50)
    return images, labels
项目:gong_an_pictures    作者:oukohou    | 项目源码 | 文件源码
def read_decode_tfrecords(records_path, num_epochs=1020, batch_size=Flags.batch_size, num_threads=2):
    if gfile.IsDirectory(records_path):
        records_path = [os.path.join(records_path, i) for i in os.listdir(records_path)]
    else:
        records_path = [records_path]
    records_path_queue = tf.train.string_input_producer(records_path, seed=123,
                                                        # num_epochs=num_epochs,
                                                        name="string_input_producer")
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(records_path_queue, name="serialized_example")
    features = tf.parse_single_example(serialized=serialized_example,
                                       features={"img_raw": tf.FixedLenFeature([], tf.string),
                                                 "label": tf.FixedLenFeature([], tf.int64),
                                                 "height": tf.FixedLenFeature([], tf.int64),
                                                 "width": tf.FixedLenFeature([], tf.int64),
                                                 "depth": tf.FixedLenFeature([], tf.int64)},
                                       name="parse_single_example")
    image = tf.decode_raw(features["img_raw"], tf.uint8, name="decode_raw")
    image.set_shape([IMAGE_PIXELS])
    image = tf.cast(image, tf.float32) * (1.0 / 255) - 0.5
    label = tf.cast(features["label"], tf.int32)
    # images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_threads,
    #                                         name="shuffle_bath", capacity=1020, min_after_dequeue=64)
    return image, label
项目:tensorlayer-chinese    作者:shorxp    | 项目源码 | 文件源码
def read_and_decode(filename):
    # generate a queue with a given file name
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)     # return the file and the name of file
    features = tf.parse_single_example(serialized_example,  # see parse_single_sequence_example for sequence example
                                       features={
                                           'label': tf.FixedLenFeature([], tf.int64),
                                           'img_raw' : tf.FixedLenFeature([], tf.string),
                                       })
    # You can do more image distortion here for training data
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [224, 224, 3])
    # img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int32)
    return img, label
项目:tensorlayer-chinese    作者:shorxp    | 项目源码 | 文件源码
def read_and_decode(filename):
    # generate a queue with a given file name
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)     # return the file and the name of file
    features = tf.parse_single_example(serialized_example,  # see parse_single_sequence_example for sequence example
                                       features={
                                           'label': tf.FixedLenFeature([], tf.int64),
                                           'img_raw' : tf.FixedLenFeature([], tf.string),
                                       })
    # You can do more image distortion here for training data
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [224, 224, 3])
    # img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int32)
    return img, label
项目:video_subtitle_extract    作者:thewintersun    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      features={
          'height': tf.FixedLenFeature([], tf.int64),
          'width': tf.FixedLenFeature([], tf.int64),
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label': tf.VarLenFeature(tf.int64),
      })

  image = tf.decode_raw(features['image_raw'], tf.uint8)
  image = tf.reshape(image, [730, 38])

  image = tf.cast(image, tf.float32) * (1. / 255) - 0.5

  label = tf.cast(features['label'], tf.int32)

  return image, label
项目:video_subtitle_extract    作者:thewintersun    | 项目源码 | 文件源码
def read_and_decode(filename_queue):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      features={
          'height': tf.FixedLenFeature([], tf.int64),
          'width': tf.FixedLenFeature([], tf.int64),
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label': tf.VarLenFeature(tf.int64),
      })

  image = tf.decode_raw(features['image_raw'], tf.uint8)
  image = tf.reshape(image, [730, 38])

  image = tf.cast(image, tf.float32) * (1. / 255) - 0.5

  label = tf.cast(features['label'], tf.int32)

  return image, label
项目:sample-cnn    作者:tae-jun    | 项目源码 | 文件源码
def _read_example(filename_queue, n_labels=50, n_samples=59049):
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
    serialized_example,
    features={
      'raw_labels': tf.FixedLenFeature([], tf.string),
      'raw_segment': tf.FixedLenFeature([], tf.string)
    })

  segment = tf.decode_raw(features['raw_segment'], tf.float32)
  segment.set_shape([n_samples])

  labels = tf.decode_raw(features['raw_labels'], tf.uint8)
  labels.set_shape([n_labels])
  labels = tf.cast(labels, tf.float32)

  return segment, labels
项目:sample-cnn    作者:tae-jun    | 项目源码 | 文件源码
def read_and_decode(filename, one_hot=True, n_classes=None):
  """ Return tensor to read from TFRecord """
  filename_queue = tf.train.string_input_producer([filename])
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(serialized_example,
                                     features={
                                       'label': tf.FixedLenFeature([],
                                                                   tf.int64),
                                       'image_raw': tf.FixedLenFeature([],
                                                                       tf.string),
                                     })
  # You can do more image distortion here for training data
  img = tf.decode_raw(features['image_raw'], tf.uint8)
  img.set_shape([28 * 28])
  img = tf.reshape(img, [28, 28, 1])
  img = tf.cast(img, tf.float32) * (1. / 255) - 0.5

  label = tf.cast(features['label'], tf.int32)
  if one_hot and n_classes:
    label = tf.one_hot(label, n_classes)

  return img, label
项目:facescore    作者:nanpian    | 项目源码 | 文件源码
def parse_example_proto(example_serialized):
  # Dense features in Example proto.
  feature_map = {
      'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
                                          default_value=''),
      'image/filename': tf.FixedLenFeature([], dtype=tf.string,
                                          default_value=''),

      'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
                                              default_value=-1),
      'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
                                             default_value=''),
      'image/height': tf.FixedLenFeature([1], dtype=tf.int64,
                                         default_value=-1),
      'image/width': tf.FixedLenFeature([1], dtype=tf.int64,
                                         default_value=-1),

  }

  features = tf.parse_single_example(example_serialized, feature_map)
  label = tf.cast(features['image/class/label'], dtype=tf.int32)
  return features['image/encoded'], label, features['image/filename']
项目:IllustrationGAN    作者:tdrussell    | 项目源码 | 文件源码
def read_and_decode2(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'file_bytes': tf.FixedLenFeature([], tf.string),
        })

    # decode the png image
    image = tf.image.decode_png(features['file_bytes'], channels=3)

    # Convert to float image
    image = tf.cast(image, tf.float32)

    image.set_shape((IMAGE_SIZE, IMAGE_SIZE, CHANNELS))

    # convert to grayscale if needed
    if CHANNELS == 1:
        image = tf.reduce_mean(image, reduction_indices=[2], keep_dims=True)

    # normalize
    image = image * (2. / 255) - 1

    return image
项目:tf_oreilly    作者:chiphuyen    | 项目源码 | 文件源码
def read_from_tfrecord(filenames):
    tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
    reader = tf.TFRecordReader()
    _, tfrecord_serialized = reader.read(tfrecord_file_queue)

    # label and image are stored as bytes but could be stored as 
    # int64 or float64 values in a serialized tf.Example protobuf.
    tfrecord_features = tf.parse_single_example(tfrecord_serialized,
                        features={
                            'label': tf.FixedLenFeature([], tf.int64),
                            'shape': tf.FixedLenFeature([], tf.string),
                            'image': tf.FixedLenFeature([], tf.string),
                        }, name='features')
    # image was saved as uint8, so we have to decode as uint8.
    image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
    shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)
    # the image tensor is flattened out, so we have to reconstruct the shape
    image = tf.reshape(image, shape)
    label = tfrecord_features['label']
    return label, shape, image
项目:tensorflow-DDT    作者:wangchao66    | 项目源码 | 文件源码
def read_and_decode(filename):
    filename_queue = tf.train.string_input_producer([filename])

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label': tf.FixedLenFeature([], tf.int64),
                                           'img_raw': tf.FixedLenFeature([], tf.string),
                                       })

    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [28, 28, 3])
    img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int32)

    return img, label
项目:Stock-Predict-RNN    作者:daiab    | 项目源码 | 文件源码
def read_and_decode(record_file):
    print(record_file)
    # read_and_decode_test(record_file)
    data_queue = tf.train.input_producer([record_file], capacity=1e5, name="string_input_producer")
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(data_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={'label': tf.FixedLenFeature([], tf.int64),
                  'target': tf.FixedLenFeature([], tf.float32),
                  'data': tf.FixedLenFeature([cfg.time_step * 4], tf.float32)})
    data_raw = features['data']
    label = features['label']
    target = features['target']
    data = tf.reshape(data_raw, [cfg.time_step, 4])
    data.set_shape([cfg.time_step, 4])
    if cfg.is_training:
        data_batch, label_batch, target_batch = tf.train.batch([data, label, target],
                                                     batch_size=cfg.batch_size,
                                                     capacity=cfg.batch_size * 50,
                                                     num_threads=4)
        return data_batch, label_batch, target_batch
    else:
        return tf.expand_dims(data, 0), tf.expand_dims(label, 0), tf.expand_dims(target, 0)