Python tensorflow 模块,string_to_number() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用tensorflow.string_to_number()

项目:deep-time-reading    作者:felixduvallet    | 项目源码 | 文件源码
def read_image_and_label(image_label_q):
    # Returns three Tensors: the decoded PNG image, the hour, and the minute.
    filename, hour_str, minute_str = tf.decode_csv(
        image_label_q.dequeue(), [[""], [""], [""]], " ")
    file_contents = tf.read_file(filename)

    # Decode image from PNG, and cast it to a float.
    example = tf.image.decode_png(file_contents, channels=image_channels)
    image = tf.cast(example, tf.float32)

    # Set the tensor size manually from the image.
    image.set_shape([image_size, image_size, image_channels])

    # Do per-image whitening (zero mean, unit standard deviation). Without this,
    # the learning algorithm diverges almost immediately because the gradient is
    # too big.
    image = tf.image.per_image_whitening(image)

    # The label should be an integer.
    hour = tf.string_to_number(hour_str, out_type=tf.int32)
    minute = tf.string_to_number(minute_str, out_type=tf.int32)

    return image, hour, minute
项目:self-supervision    作者:gustavla    | 项目源码 | 文件源码
def _imagenet_load_file(path, epochs=None, shuffle=True, seed=0, subset='train', prepare_path=True):
    IMAGENET_ROOT = os.environ.get('IMAGENET_DIR', '')
    if not isinstance(path, list):
        path = [path]
    filename_queue = tf.train.string_input_producer(path,
            num_epochs=epochs, shuffle=shuffle, seed=seed)

    reader = tf.TextLineReader()
    key, value = reader.read(filename_queue)
    image_path, label_str = tf.decode_csv(value, record_defaults=[[''], ['']], field_delim=' ')

    if prepare_path:
        image_abspath = IMAGENET_ROOT + '/images/' + subset + image_path
    else:
        image_abspath = image_path

    image_content = tf.read_file(image_abspath)
    image = decode_image(image_content, channels=3)
    image.set_shape([None, None, 3])

    imgshape = tf.shape(image)[:2]
    label = tf.string_to_number(label_str, out_type=tf.int32)

    return image, label, imgshape, image_path
项目:provectus-final-project    作者:eds-uga    | 项目源码 | 文件源码
def input_fn(batch_size,file_name):
    """
    Input function creates feautre and label dict for cross-validation
    :param batch_size:
    :param file_name:
    :return: feature dict
    """
    examples_op = tf.contrib.learn.read_batch_examples(
        file_name,
        batch_size=batch_size,
        reader=tf.TextLineReader,
    num_threads=5,
        num_epochs=1,
        randomize_input=False,
        parse_fn=lambda x: tf.decode_csv(x, [tf.constant([''], dtype=tf.string)] * len(COLUMNS),field_delim=","))

    examples_dict = {}

    for i, header in enumerate(COLUMNS):
        examples_dict[header] = examples_op[:,i]


    feature_cols = {k: tf.string_to_number(examples_dict[k], out_type=tf.float32)
                    for k in CONTINUOUS_COLUMNS}

    feature_cols.update({k: dense_to_sparse(examples_dict[k])
                         for k in CATEGORICAL_COLUMNS})

    label = tf.string_to_number(examples_dict[LABEL_COLUMN], out_type=tf.int32)

    return feature_cols, label
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def __read_imagenet(path, shuffle=True, save_file = 'imagenet_files.csv'):
    if not os.path.exists(save_file):
        def class_index(fn):
            class_id = re.search(r'(n\d+)', fn).group(1)
            return synset_map[class_id]['index']

        file_list = glob.glob(path+'/*/*.JPEG')
        label_indexes = []
        with open(save_file, 'wb') as csv_file:
            wr = csv.writer(csv_file, quoting=csv.QUOTE_NONE)
            for f in file_list:
                idx = class_index(f)
                label_indexes.append(idx)
                wr.writerow([f, idx])

    with open(save_file, 'rb') as f:
        reader = csv.reader(f)
        file_list = list(reader)
    file_tuple, label_tuple = zip(*file_list)

    filename, labels = tf.train.slice_input_producer([list(file_tuple), list(label_tuple)], shuffle=shuffle)
    images = tf.image.decode_jpeg(tf.read_file(filename), channels=3)
    images = tf.div(tf.add(tf.to_float(images), -127), 128)
    return images, tf.string_to_number(labels, tf.int32)
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def test_postprocess():
    """
    This test uses the data in tftestdata2/ to illustrate how to read out
    something that has been written as a string but is "really" an integer.
    The data in tftestdata2/ids is just a single attribute, namely "ids",
    written out as a string but actually it really represents integers.
    """
    source_paths = [os.path.join(dir_path, 'tftestdata2/ids')]
    postprocess = {'ids': [(tf.string_to_number, (tf.int32, ), {})]}
    dp = d.TFRecordsParallelByFileProvider(source_paths,
                                           n_threads=1,
                                           batch_size=20,
                                           shuffle=False,
                                           postprocess=postprocess)
    sess = tf.Session()
    ops = dp.init_ops()
    queue = b.get_queue(ops[0], queue_type='fifo')
    enqueue_ops = []
    for op in ops:
        enqueue_ops.append(queue.enqueue_many(op))
    tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(queue, enqueue_ops))
    tf.train.start_queue_runners(sess=sess)
    K = 20
    inputs = queue.dequeue_many(K)
    N = 100
    testlist = np.arange(K * N) % 160
    for i in range(N):
        print('%d of %d' % (i, N))
        res = sess.run(inputs)
        assert_equal(res['ids'], testlist[K * i: K * (i+1)])
项目:attention    作者:louishenrifranc    | 项目源码 | 文件源码
def get_input_fn(batch_size, num_epochs, context_filename, answer_filename, max_sequence_len):
    def input_fn():
        source_dataset = tf.contrib.data.TextLineDataset(context_filename)
        target_dataset = tf.contrib.data.TextLineDataset(answer_filename)

        def map_dataset(dataset):
            dataset = dataset.map(lambda string: tf.string_split([string]).values)
            dataset = dataset.map(lambda token: tf.string_to_number(token, tf.int64))
            dataset = dataset.map(lambda tokens: (tokens, tf.size(tokens)))
            dataset = dataset.map(lambda tokens, size: (tokens[:max_sequence_len], tf.minimum(size, max_sequence_len)))
            return dataset

        source_dataset = map_dataset(source_dataset)
        target_dataset = map_dataset(target_dataset)

        dataset = tf.contrib.data.Dataset.zip((source_dataset, target_dataset))
        dataset = dataset.repeat(num_epochs)
        dataset = dataset.padded_batch(batch_size,
                                       padded_shapes=((tf.TensorShape([max_sequence_len]), tf.TensorShape([])),
                                                      (tf.TensorShape([max_sequence_len]), tf.TensorShape([]))
                                                      ))

        iterator = dataset.make_one_shot_iterator()
        next_element = iterator.get_next()
        return next_element, None

    return input_fn
项目:provectus-final-project    作者:eds-uga    | 项目源码 | 文件源码
def input_fn(batch_size,file_name):
    """
    :param batch_size:
    :param file_name:
    :return: features and label dict
    """
    examples_op = tf.contrib.learn.read_batch_examples(
        file_name,
        batch_size=batch_size,
        reader=tf.TextLineReader,
        num_epochs=1,

        parse_fn=lambda x: tf.decode_csv(x, [tf.constant([''], dtype=tf.string)] * len(COLUMNS),field_delim=","))

    examples_dict = {}

    for i, header in enumerate(COLUMNS):
        examples_dict[header] = examples_op[:,i]




    feature_cols = {k: tf.string_to_number(examples_dict[k], out_type=tf.float32)
                    for k in CONTINUOUS_COLUMNS}

    feature_cols.update({k: dense_to_sparse(examples_dict[k])
                         for k in CATEGORICAL_COLUMNS})

    label = tf.string_to_number(examples_dict[LABEL_COLUMN], out_type=tf.int32)

    return feature_cols, label
项目:provectus-final-project    作者:eds-uga    | 项目源码 | 文件源码
def input_fn_eval(batch_size,file_name):
    """
     Input function to predict the test features
    :param batch_size:
    :param file_name:
    :return: features and label dict
    """
    examples_op = tf.contrib.learn.read_batch_examples(
        file_name,
        batch_size=batch_size,
        reader=tf.TextLineReader,
        randomize_input=False,
        read_batch_size=1,
        num_threads=5,
        num_epochs=1,
        parse_fn=lambda x: tf.decode_csv(x, [tf.constant([''], dtype=tf.string)] * len(COLUMNS),field_delim=","))
    examples_dict = {}

    for i, header in enumerate(COLUMNS):
        examples_dict[header] = examples_op[:,i]


    feature_cols = {k: tf.string_to_number(examples_dict[k], out_type=tf.float32)
                    for k in CONTINUOUS_COLUMNS}

    feature_cols.update({k: dense_to_sparse(examples_dict[k])
                         for k in CATEGORICAL_COLUMNS})


    return feature_cols
项目:TF_DD2427Proj    作者:maxlotz    | 项目源码 | 文件源码
def load_batches(image_filenames=None,
                 label_filenames=None,
                 shape=(64, 64, 3),
                 batch_size=100):

    min_after_dequeue = 1000
    capacity = min_after_dequeue + 3 * batch_size #As recommended on tf website

    with tf.name_scope('input'):
        image_queue, label_queue = tf.train.slice_input_producer(
                                            [image_filenames, label_filenames],
                                            shuffle=True)

        #File reader and decoder for images and labels goes here
        with tf.name_scope('image'): 

            with tf.name_scope('decode'): 
                file_content = tf.read_file(image_queue)
                image_data = tf.image.decode_jpeg(file_content, channels=3)

            #Any resizing or processing (eg. normalising) goes here
            with tf.name_scope('normalize'): 
                image_data = tf.cast(image_data, tf.float32)
                image_data = tf.image.per_image_standardization(image_data)
                image_data.set_shape(shape)


        with tf.name_scope('label'): 

            with tf.name_scope('decode'): 
                label_data = tf.string_to_number(label_queue, out_type=tf.int32)

        image_batch, label_batch = tf.train.shuffle_batch(
                                            [image_data, label_data],
                                            batch_size=batch_size,
                                            capacity=capacity,
                                            min_after_dequeue=min_after_dequeue,
                                            #,num_threads=1
                                            )

    return image_batch, label_batch
项目:windbag    作者:tongda    | 项目源码 | 文件源码
def _read_id_file(path) -> Dataset:
  def _parse_line(line):
    splits = tf.string_split(tf.reshape(line, (-1,))).values
    return tf.string_to_number(splits, out_type=tf.int32)

  return TextLineDataset(path) \
    .filter(lambda line: tf.size(line) > 0) \
    .map(_parse_line)
项目:tfdeploy    作者:riga    | 项目源码 | 文件源码
def test_StringToNumber(self):
        t = tf.string_to_number(list("0123456789"))
        self.check(t)


    #
    # shapes and shaping
    #
项目:hsr    作者:pyk    | 项目源码 | 文件源码
def read_images(data_dir):
    pattern = os.path.join(data_dir, '*.png')
    filenames = tf.train.match_filenames_once(pattern, name='list_files')

    queue = tf.train.string_input_producer(
        filenames, 
        num_epochs=NUM_EPOCHS, 
        shuffle=True, 
        name='queue')

    reader = tf.WholeFileReader()
    filename, content = reader.read(queue, name='read_image')
    filename = tf.Print(
        filename, 
        data=[filename],
        message='loading: ')
    filename_split = tf.string_split([filename], delimiter='/')
    label_id = tf.string_to_number(tf.substr(filename_split.values[1], 
        0, 1), out_type=tf.int32)
    label = tf.one_hot(
        label_id-1, 
        5, 
        on_value=1.0, 
        off_value=0.0, 
        dtype=tf.float32)

    img_tensor = tf.image.decode_png(
        content, 
        dtype=tf.uint8, 
        channels=3,
        name='img_decode')

    # Preprocess the image, Performs random transformations
    # Random flip
    img_tensor_flip = tf.image.random_flip_left_right(img_tensor)

    # Random brightness
    img_tensor_bri = tf.image.random_brightness(img_tensor_flip, 
        max_delta=0.2)

    # Per-image scaling
    img_tensor_std = tf.image.per_image_standardization(img_tensor_bri)

    min_after_dequeue = 1000
    capacity = min_after_dequeue + 3 * BATCH_SIZE
    example_batch, label_batch = tf.train.shuffle_batch(
        [img_tensor_std, label], 
        batch_size=BATCH_SIZE,
        shapes=[(IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS), (NUM_CLASS)],
        capacity=capacity, 
        min_after_dequeue=min_after_dequeue,
        name='train_shuffle')

    return example_batch, label_batch

# `images` is a 4-D tensor with the shape:
# [n_batch, img_height, img_width, n_channel]
项目:pydatalab    作者:googledatalab    | 项目源码 | 文件源码
def _create_model(self, dir_name):
    """Create a simple model that takes 'key', 'num1', 'text1', 'img_url1' input."""

    def _decode_jpg(image):
      img_buf = BytesIO()
      Image.new('RGB', (16, 16)).save(img_buf, 'jpeg')
      default_image_string = base64.urlsafe_b64encode(img_buf.getvalue())
      image = tf.where(tf.equal(image, ''), default_image_string, image)
      image = tf.decode_base64(image)
      image = tf.image.decode_jpeg(image, channels=3)
      image = tf.reshape(image, [-1])
      image = tf.reduce_max(image)
      return image

    model_dir = tempfile.mkdtemp()
    with tf.Session(graph=tf.Graph()) as sess:
      record_defaults = [
          tf.constant([0], dtype=tf.int64),
          tf.constant([0.0], dtype=tf.float32),
          tf.constant([''], dtype=tf.string),
          tf.constant([''], dtype=tf.string),
      ]
      placeholder = tf.placeholder(dtype=tf.string, shape=(None,), name='csv_input_placeholder')
      key_tensor, num_tensor, text_tensor, img_tensor = tf.decode_csv(placeholder, record_defaults)
      text_tensor = tf.string_to_number(text_tensor, tf.float32)
      img_tensor = tf.map_fn(_decode_jpg, img_tensor, back_prop=False, dtype=tf.uint8)
      img_tensor = tf.cast(img_tensor, tf.float32)
      stacked = tf.stack([num_tensor, text_tensor, img_tensor])
      min_tensor = tf.reduce_min(stacked, axis=0)
      max_tensor = tf.reduce_max(stacked, axis=0)

      predict_input_tensor = tf.saved_model.utils.build_tensor_info(placeholder)
      predict_signature_inputs = {"input": predict_input_tensor}
      predict_output_tensor1 = tf.saved_model.utils.build_tensor_info(min_tensor)
      predict_output_tensor2 = tf.saved_model.utils.build_tensor_info(max_tensor)
      predict_key_tensor = tf.saved_model.utils.build_tensor_info(key_tensor)
      predict_signature_outputs = {
        'key': predict_key_tensor,
        'var1': predict_output_tensor1,
        'var2': predict_output_tensor2
      }
      predict_signature_def = (
          tf.saved_model.signature_def_utils.build_signature_def(
              predict_signature_inputs, predict_signature_outputs,
              tf.saved_model.signature_constants.PREDICT_METHOD_NAME
          )
      )
      signature_def_map = {
          signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature_def
      }
      model_dir = os.path.join(self._test_dir, dir_name)
      builder = tf.saved_model.builder.SavedModelBuilder(model_dir)
      builder.add_meta_graph_and_variables(
          sess, [tf.saved_model.tag_constants.SERVING],
          signature_def_map=signature_def_map)
      builder.save(False)

    return model_dir