Python tensorflow 模块,reduce_join() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用tensorflow.reduce_join()

项目:tensorboard    作者:tensorflow    | 项目源码 | 文件源码
def markdown_table(step):
  # The text summary can also contain Markdown, including Markdown
  # tables. Markdown tables look like this:
  #
  #     | hello | there |
  #     |-------|-------|
  #     | this  | is    |
  #     | a     | table |
  #
  # The leading and trailing pipes in each row are optional, and the text
  # doesn't actually have to be neatly aligned, so we can create these
  # pretty easily. Let's do so.
  header_row = 'Pounds of chocolate | Happiness'
  chocolate = tf.range(step)
  happiness = tf.square(chocolate + 1)
  chocolate_column = tf.as_string(chocolate)
  happiness_column = tf.as_string(happiness)
  table_rows = tf.string_join([chocolate_column, " | ", happiness_column])
  table_body = tf.reduce_join(table_rows, separator='\n')
  table = tf.string_join([header_row, "---|---", table_body], separator='\n')
  preamble = 'We conducted an experiment and found the following data:\n\n'
  result = tf.string_join([preamble, table])
  tf.summary.text('chocolate_study', result)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def create_metric_ops(self, _inputs, labels, predictions):
    """Creates (value, update_op) tensors
    """
    with tf.variable_scope(self._name):

      # Join tokens into single strings
      predictions_flat = tf.reduce_join(
          predictions["predicted_tokens"], 1, separator=self._separator)
      labels_flat = tf.reduce_join(
          labels["target_tokens"], 1, separator=self._separator)

      sources_value, sources_update = accumulate_strings(
          values=predictions_flat, name="sources")
      targets_value, targets_update = accumulate_strings(
          values=labels_flat, name="targets")

      metric_value = tf.py_func(
          func=self._py_func,
          inp=[sources_value, targets_value],
          Tout=tf.float32,
          name="value")

    with tf.control_dependencies([sources_update, targets_update]):
      update_op = tf.identity(metric_value, name="update_op")

    return metric_value, update_op
项目:tf-crnn    作者:solivr    | 项目源码 | 文件源码
def get_words_from_chars(characters_list: List[str], sequence_lengths: List[int], name='chars_conversion'):
    with tf.name_scope(name=name):
        def join_charcaters_fn(coords):
            return tf.reduce_join(characters_list[coords[0]:coords[1]])

        def coords_several_sequences():
            end_coords = tf.cumsum(sequence_lengths)
            start_coords = tf.concat([[0], end_coords[:-1]], axis=0)
            coords = tf.stack([start_coords, end_coords], axis=1)
            coords = tf.cast(coords, dtype=tf.int32)
            return tf.map_fn(join_charcaters_fn, coords, dtype=tf.string)

        def coords_single_sequence():
            return tf.reduce_join(characters_list, keep_dims=True)

        words = tf.cond(tf.shape(sequence_lengths)[0] > 1,
                        true_fn=lambda: coords_several_sequences(),
                        false_fn=lambda: coords_single_sequence())

        return words
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def create_metric_ops(self, _inputs, labels, predictions):
    """Creates (value, update_op) tensors
    """
    with tf.variable_scope(self._name):

      # Join tokens into single strings
      predictions_flat = tf.reduce_join(
          predictions["predicted_tokens"], 1, separator=self._separator)
      labels_flat = tf.reduce_join(
          labels["target_tokens"], 1, separator=self._separator)

      sources_value, sources_update = accumulate_strings(
          values=predictions_flat, name="sources")
      targets_value, targets_update = accumulate_strings(
          values=labels_flat, name="targets")

      metric_value = tf.py_func(
          func=self._py_func,
          inp=[sources_value, targets_value],
          Tout=tf.float32,
          name="value")

    with tf.control_dependencies([sources_update, targets_update]):
      update_op = tf.identity(metric_value, name="update_op")

    return metric_value, update_op
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def create_metric_ops(self, _inputs, labels, predictions):
        """Creates (value, update_op) tensors
        """
        with tf.variable_scope(self._name):

            # Join tokens into single strings
            predictions_flat = tf.reduce_join(
                predictions["predicted_tokens"], 1, separator=self._separator)
            labels_flat = tf.reduce_join(
                labels["target_tokens"], 1, separator=self._separator)

            sources_value, sources_update = accumulate_strings(
                values=predictions_flat, name="sources")
            targets_value, targets_update = accumulate_strings(
                values=labels_flat, name="targets")

            metric_value = tf.py_func(
                func=self._py_func,
                inp=[sources_value, targets_value],
                Tout=tf.float32,
                name="value")

        with tf.control_dependencies([sources_update, targets_update]):
            update_op = tf.identity(metric_value, name="update_op")

        return metric_value, update_op
项目:automatic-summarization    作者:mozilla    | 项目源码 | 文件源码
def create_metric_ops(self, _inputs, labels, predictions):
    """Creates (value, update_op) tensors
    """
    with tf.variable_scope(self._name):

      # Join tokens into single strings
      predictions_flat = tf.reduce_join(
          predictions["predicted_tokens"], 1, separator=self._separator)
      labels_flat = tf.reduce_join(
          labels["target_tokens"], 1, separator=self._separator)

      sources_value, sources_update = accumulate_strings(
          values=predictions_flat, name="sources")
      targets_value, targets_update = accumulate_strings(
          values=labels_flat, name="targets")

      metric_value = tf.py_func(
          func=self._py_func,
          inp=[sources_value, targets_value],
          Tout=tf.float32,
          name="value")

    with tf.control_dependencies([sources_update, targets_update]):
      update_op = tf.identity(metric_value, name="update_op")

    return metric_value, update_op
项目:tf_face    作者:wwoo    | 项目源码 | 文件源码
def read_image_from_disk(input_queue):
    label = input_queue[1]
    file_name = tf.reduce_join([TENSOR_BASE_PATH, input_queue[0]], 0)
    file_contents = tf.read_file(file_name)
    rgb_image = tf.image.decode_jpeg(file_contents, channels=FLAGS.image_channels,
        name="decode_jpeg")
    rgb_image  = tf.image.convert_image_dtype(rgb_image, dtype=tf.float32,
        name="image_convert_float32")
    rgb_image = tf.image.resize_images(rgb_image,
        [FLAGS.image_size, FLAGS.image_size])

    return rgb_image, label
项目:NumberCamera    作者:KarlXiao    | 项目源码 | 文件源码
def evaluate(self, path_to_checkpoint, path_to_tfrecords_file, num_examples, global_step):
        batch_size = 128
        num_batches = num_examples / batch_size
        needs_include_length = False

        with tf.Graph().as_default():
            image_batch, length_batch, digits_batch = Donkey.build_batch(path_to_tfrecords_file,
                                                                         num_examples=num_examples,
                                                                         batch_size=batch_size,
                                                                         shuffled=False)
            # length_logits, digits_logits = Model.inference(image_batch, drop_rate=0.0)
            length_logits, digits_logits = Model.forward(image_batch, 1.0)
            length_predictions = tf.argmax(length_logits, axis=1)
            digits_predictions = tf.argmax(digits_logits, axis=2)

            if needs_include_length:
                labels = tf.concat([tf.reshape(length_batch, [-1, 1]), digits_batch], axis=1)
                predictions = tf.concat([tf.reshape(length_predictions, [-1, 1]), digits_predictions], axis=1)
            else:
                labels = digits_batch
                predictions = digits_predictions

            labels_string = tf.reduce_join(tf.as_string(labels), axis=1)
            predictions_string = tf.reduce_join(tf.as_string(predictions), axis=1)

            accuracy, update_accuracy = tf.metrics.accuracy(
                labels=labels_string,
                predictions=predictions_string
            )

            tf.summary.image('image', image_batch)
            tf.summary.scalar('accuracy', accuracy)
            tf.summary.histogram('variables',
                                 tf.concat([tf.reshape(var, [-1]) for var in tf.trainable_variables()], axis=0))
            summary = tf.summary.merge_all()

            with tf.Session() as sess:
                sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)

                restorer = tf.train.Saver()
                restorer.restore(sess, path_to_checkpoint)

                for _ in range(int(num_batches)):
                    sess.run(update_accuracy)

                accuracy_val, summary_val = sess.run([accuracy, summary])
                self.summary_writer.add_summary(summary_val, global_step=global_step)

                coord.request_stop()
                coord.join(threads)

        return accuracy_val