Python tensorflow 模块,local_variables_initializer() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.local_variables_initializer()

项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores):
    """Tests a MetricSpec"""
    predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)}
    labels = {"target_tokens": tf.placeholder(dtype=tf.string)}

    value, update_op = metric_spec.create_metric_ops(None, labels, predictions)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())

      scores = []
      for hyp, ref in zip(hyps, refs):
        hyp = hyp.split(" ")
        ref = ref.split(" ")
        sess.run(update_op, {
            predictions["predicted_tokens"]: [hyp],
            labels["target_tokens"]: [ref]
        })
        scores.append(sess.run(value))

      for score, expected in zip(scores, expected_scores):
        np.testing.assert_almost_equal(score, expected, decimal=2)
        np.testing.assert_almost_equal(score, expected, decimal=2)
项目:How-to-Learn-from-Little-Data    作者:llSourcell    | 项目源码 | 文件源码
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_reading_without_targets(self):
    num_epochs = 50
    data_provider = make_parallel_data_provider(
        data_sources_source=[self.source_file.name],
        data_sources_target=None,
        num_epochs=num_epochs,
        shuffle=True)

    item_keys = list(data_provider.list_items())
    item_values = data_provider.get(item_keys)
    items_dict = dict(zip(item_keys, item_values))

    self.assertEqual(set(item_keys), set(["source_tokens", "source_len"]))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())
      with tf.contrib.slim.queues.QueueRunners(sess):
        item_dicts_ = [sess.run(items_dict) for _ in range(num_epochs * 3)]

    for item_dict in item_dicts_:
      self.assertEqual(item_dict["source_len"], 2)
      item_dict["source_tokens"] = np.char.decode(
          item_dict["source_tokens"].astype("S"), "utf-8")
      self.assertEqual(item_dict["source_tokens"][-1], "SEQUENCE_END")
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def predictPL(self):
        B = self.flags.batch_size
        W,H,C = self.flags.width, self.flags.height, self.flags.color
        inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C])

        #with open(self.flags.pred_path,'w') as f:
        #    pass

        self._build(inputs,resize=False)
        counter = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            for imgs,imgnames in self.DATA.test_generator():
                pred = sess.run(self.logit,feed_dict={inputs:imgs})
                np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames})
                counter+=len(imgs)
                if counter/B%10 ==0:
                    print_mem_time("%d images predicted"%counter)

    # train with placeholders
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def predict_from_placeholder(self,activation=None):
        self._build()
        self._get_summary()
        if activation is not None:
            self.logit = self._activate(self.logit,activation)
        with open(self.flags.pred_path,'w') as f:
            pass
        count = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            if self.flags.log_path and self.flags.visualize is not None:
                summary_writer = tf.summary.FileWriter(self.flags.log_path, sess.graph)
            for batch in self._batch_gen_test():
                x,_,epoch = batch
                if self.flags.log_path and self.flags.visualize is not None:
                    summary,pred = sess.run([self.summ_op,self.logit],feed_dict={self.inputs:x,self.is_training:0})
                    summary_writer.add_summary(summary, count)
                else:
                    pred = sess.run(self.logit,feed_dict={self.inputs:x,self.is_training:0})
                count+=1
                if count%self.flags.verbosity == 0:
                    print_mem_time("Epoch %d Batch %d "%(epoch,count))
                self.write_pred(pred)
项目:NTM-One-Shot-TF    作者:hmishra2250    | 项目源码 | 文件源码
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
项目:unsupervised-2017-cvprw    作者:imatge-upc    | 项目源码 | 文件源码
def input_pipeline(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True):
    filename_queue = tf.train.string_input_producer(
                        filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training)
    # initialize local variables if num_epochs is not None or it'll raise uninitialized problem
    tf.get_default_session().run(tf.local_variables_initializer())

    example_list = [read_my_file_format(filename_queue, is_training) \
                        for _ in range(read_threads)]

    min_after_dequeue = 300 if is_training else 10
    capacity = min_after_dequeue + 3 * batch_size
    clip_batch, img_mask_batch, loss_mask_batch = tf.train.shuffle_batch_join(
        example_list, batch_size=batch_size, capacity=capacity,
        min_after_dequeue=min_after_dequeue)

    return clip_batch, img_mask_batch, loss_mask_batch
项目:unsupervised-2017-cvprw    作者:imatge-upc    | 项目源码 | 文件源码
def input_pipeline_dis(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True):
    filename_queue = tf.train.string_input_producer(
                        filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training)
    # initialize local variables if num_epochs is not None or it'll raise uninitialized problem
    tf.get_default_session().run(tf.local_variables_initializer())

    example_list = [read_my_file_format_dis(filename_queue, is_training) \
                        for _ in range(read_threads)]

    min_after_dequeue = 300 if is_training else 10
    capacity = min_after_dequeue + 3 * batch_size
    clip_batch, label_batch, text_batch = tf.train.shuffle_batch_join(
        example_list, batch_size=batch_size, capacity=capacity,
        min_after_dequeue=min_after_dequeue)

    return clip_batch, label_batch, text_batch
项目:TFExperiments    作者:gnperdue    | 项目源码 | 文件源码
def examine_batches(features_batch, targets_batch):
    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        try:
            for it in range(5000):
                features, targets = sess.run([features_batch, targets_batch])
                if it % 100 == 0:
                    LOGGER.debug(it)
                    LOGGER.debug(
                        len(features),
                        features[0].shape,
                        np.max(features[0][0][7][:])
                    )
                    LOGGER.debug(np.argmax(targets, axis=1))
        except tf.errors.OutOfRangeError:
            LOGGER.info('Training stopped - queue is empty.')
        except Exception as e:
            LOGGER.error(e)
        finally:
            coord.request_stop()
            coord.join(threads)
项目:tensorflow-action-conditional-video-prediction    作者:williamd4112    | 项目源码 | 文件源码
def main(args):
    with tf.Graph().as_default() as graph:
        # Create dataset
        logging.info('Create data flow from %s' % args.data)
        caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)

        # Config session
        config = get_config(args)

        x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
        op = load_caffe_model(x, args.load)

        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

        # Start session
        with tf.Session(config=config) as sess:
            sess.run(init)
            i = 0
            for s, a in caffe_dataset(5):
                pred_data = sess.run([op], feed_dict={x: [s]})[0]
                print pred_data.shape
                np.save('tf-%03d.npy' % i, pred_data)
                i += 1
项目:tf-cnn-lstm-ocr-captcha    作者:Luonic    | 项目源码 | 文件源码
def evaluate():
    """Eval ocr for a number of steps."""
    with tf.Graph().as_default() as g:
        images, labels, seq_lengths = ocr.inputs()
        logits, timesteps = ocr.inference(images, FLAGS.eval_batch_size, train=True)
        ler = ocr.create_label_error_rate(logits, labels, timesteps)
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        config = tf.ConfigProto(
            device_count={'GPU': 0}
        )
        sess = tf.Session(config=config)
        sess.run(init_op)

        saver = tf.train.Saver()

        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, summary_writer, ler, summary_op)
            if FLAGS.run_once:
                break
            # print("Waiting for next evaluation for " + str(FLAGS.eval_interval_secs) + " sec")
            time.sleep(FLAGS.eval_interval_secs)
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def main(args):
    with tf.Graph().as_default() as graph:
        # Create dataset
        logging.info('Create data flow from %s' % args.data)
        caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)

        # Config session
        config = get_config(args)

        x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
        op = load_caffe_model(x, args.load)

        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

        # Start session
        with tf.Session(config=config) as sess:
            sess.run(init)
            i = 0
            for s, a in caffe_dataset(5):
                pred_data = sess.run([op], feed_dict={x: [s]})[0]
                print pred_data.shape
                np.save('tf-%03d.npy' % i, pred_data)
                i += 1
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores):
    """Tests a MetricSpec"""
    predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)}
    labels = {"target_tokens": tf.placeholder(dtype=tf.string)}

    value, update_op = metric_spec.create_metric_ops(None, labels, predictions)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())

      scores = []
      for hyp, ref in zip(hyps, refs):
        hyp = hyp.split(" ")
        ref = ref.split(" ")
        sess.run(update_op, {
            predictions["predicted_tokens"]: [hyp],
            labels["target_tokens"]: [ref]
        })
        scores.append(sess.run(value))

      for score, expected in zip(scores, expected_scores):
        np.testing.assert_almost_equal(score, expected, decimal=2)
        np.testing.assert_almost_equal(score, expected, decimal=2)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def test_reading_without_targets(self):
    num_epochs = 50
    data_provider = make_parallel_data_provider(
        data_sources_source=[self.source_file.name],
        data_sources_target=None,
        num_epochs=num_epochs,
        shuffle=True)

    item_keys = list(data_provider.list_items())
    item_values = data_provider.get(item_keys)
    items_dict = dict(zip(item_keys, item_values))

    self.assertEqual(set(item_keys), set(["source_tokens", "source_len"]))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())
      with tf.contrib.slim.queues.QueueRunners(sess):
        item_dicts_ = [sess.run(items_dict) for _ in range(num_epochs * 3)]

    for item_dict in item_dicts_:
      self.assertEqual(item_dict["source_len"], 2)
      item_dict["source_tokens"] = np.char.decode(
          item_dict["source_tokens"].astype("S"), "utf-8")
      self.assertEqual(item_dict["source_tokens"][-1], "SEQUENCE_END")
项目:satellite-image-object-detection    作者:marcbelmont    | 项目源码 | 文件源码
def test_batch(self):
        with self.test_session() as sess:
            df = pd.DataFrame(
                ['TQ2379_0_0_B  TQ2379_0_0.jpg      F  1776:520|1824:125'.split(),
                 'TQ2379_0_0_B  TQ2379_0_0.jpg      F  1776:500|1824:125'.split(),
                ],
                columns=['id', 'image', 'class', 'detections'])
            df = extract_crops_sw(df, 250, False, 250)
            batch = create_batch(df, False)

            tf.global_variables_initializer().run()
            tf.local_variables_initializer().run()
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            images, labels = sess.run(batch)
            self.assertListEqual(list(labels[0, 2, 3, :5]),
                                 [1., 74., 125., 30., 30.])
            self.assertTrue(labels[0, 2, 3, 5 + 5])

            coord.request_stop()
            coord.join(threads)
项目:SpikeFlow    作者:deeperic    | 项目源码 | 文件源码
def inputs_test(filename, batch_size, num_epochs, num_threads,
           imshape, num_examples_per_epoch=128):

  tf.local_variables_initializer()

  if not num_epochs:
    num_epochs = None

  with tf.name_scope('input'):
    filename_queue = tf.train.string_input_producer(
      [filename], num_epochs=num_epochs, name='string_input_producer')

    image, label = reader.read_and_decode_wholefile(filename_queue, imshape, normalize=True)

    images, sparse_labels = tf.train.batch([image, label], batch_size=batch_size)  

    return images, sparse_labels
项目:deep-RL-DQN-tensorflow    作者:ZidanMusk    | 项目源码 | 文件源码
def main():

    dqn = DQN(ENV_NAME, DOUBLE_DQN, DUELING_DQN, PER, TRAINING, RENDER)

    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

    with tf.Session() as sess:

        sess.run(init_op)
        #tries to restore a trained model and play!
        dqn.util.restore_graph(sess,forTrain = TRAINING)

        for ep in tqdm(range(MAX_EPISODES)):# for episodes

            print("Episode no. {} :".format(ep))

            dqn.playing(sess)

            print('Episode %d: totalEpReward = %.2f , took: %.3f mins' % (ep, dqn.totalReward,dqn.duration/60.0))


#RUN...
项目:Super_TF    作者:Dhruv-Mohan    | 项目源码 | 文件源码
def main():
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    dummy_reader = Dataset_reader_classification(filename=_DATASET_PATH_, num_classes=_CLASSES_)
    #dummy_reader.pre_process_image(writer_pre_proc)

    with tf.Session() as sess:
        init_op.run()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        images, labels = dummy_reader.next_batch(_BATCH_SIZE_)
        meanimage = sess.run([dummy_reader.mean_image])[0]
        print(meanimage)
        print(images[0])
        if _SHOW_IMAGES_ :
            for image in images:
                cv2.imshow('Image', image)
                cv2.imshow('Meanimage',meanimage)
                cv2.waitKey(0)

        coord.request_stop()
        coord.join(threads)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores):
        """Tests a MetricSpec"""
        predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)}
        labels = {"target_tokens": tf.placeholder(dtype=tf.string)}

        value, update_op = metric_spec.create_metric_ops(
            None, labels, predictions)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())

            scores = []
            for hyp, ref in zip(hyps, refs):
                hyp = hyp.split(" ")
                ref = ref.split(" ")
                sess.run(update_op, {
                    predictions["predicted_tokens"]: [hyp],
                    labels["target_tokens"]: [ref]
                })
                scores.append(sess.run(value))

            for score, expected in zip(scores, expected_scores):
                self.assertNDArrayNear(score, expected, 0.01)
                self.assertNDArrayNear(score, expected, 0.01)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testFinalOpsOnEvaluationLoop(self):
    value_op, update_op = slim.metrics.streaming_accuracy(
        self._predictions, self._labels)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    # Create Checkpoint and log directories
    chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
    gfile.MakeDirs(chkpt_dir)
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
    gfile.MakeDirs(logdir)

    # Save initialized variables to checkpoint directory
    saver = tf.train.Saver()
    with self.test_session() as sess:
      init_op.run()
      saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

    # Now, run the evaluation loop:
    accuracy_value = slim.evaluation.evaluation_loop(
        '', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
        max_number_of_evaluations=1)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testRestoredModelPerformance(self):
    checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
    log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

    # First, save out the current model to a checkpoint:
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
    with self.test_session() as sess:
      sess.run(init_op)
      saver.save(sess, checkpoint_path)

    # Next, determine the metric to evaluate:
    value_op, update_op = slim.metrics.streaming_accuracy(
        self._predictions, self._labels)

    # Run the evaluation and verify the results:
    accuracy_value = slim.evaluation.evaluate_once(
        '',
        checkpoint_path,
        log_dir,
        eval_op=update_op,
        final_op=value_op)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testUpdateOpsReturnsCurrentValue(self):
    with self.test_session() as sess:
      values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values)

      sess.run(tf.local_variables_initializer())

      self.assertAlmostEqual(0.5, sess.run(update_op), 5)
      self.assertAlmostEqual(1.475, sess.run(update_op), 5)
      self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5)
      self.assertAlmostEqual(1.65, sess.run(update_op), 5)

      self.assertAlmostEqual(1.65, sess.run(mean), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test1dWeightedValues(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      # Create the queue that populates the weighted labels.
      weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
      _enqueue_vector(sess, weights_queue, [1])
      _enqueue_vector(sess, weights_queue, [0])
      _enqueue_vector(sess, weights_queue, [0])
      _enqueue_vector(sess, weights_queue, [1])
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values, weights)

      tf.local_variables_initializer().run()
      for _ in range(4):
        update_op.eval()
      self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test1dWeightedValues_placeholders(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      feed_values = (
          (0, 1),
          (-4.2, 9.1),
          (6.5, 0),
          (-3.2, 4.0)
      )
      values = tf.placeholder(dtype=tf.float32)

      # Create the queue that populates the weighted labels.
      weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
      _enqueue_vector(sess, weights_queue, [1])
      _enqueue_vector(sess, weights_queue, [0])
      _enqueue_vector(sess, weights_queue, [0])
      _enqueue_vector(sess, weights_queue, [1])
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values, weights)

      tf.local_variables_initializer().run()
      for i in range(4):
        update_op.eval(feed_dict={values: feed_values[i]})
      self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test2dWeightedValues(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      # Create the queue that populates the weighted labels.
      weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
      _enqueue_vector(sess, weights_queue, [1, 1])
      _enqueue_vector(sess, weights_queue, [1, 0])
      _enqueue_vector(sess, weights_queue, [0, 1])
      _enqueue_vector(sess, weights_queue, [0, 0])
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean(values, weights)

      tf.local_variables_initializer().run()
      for _ in range(4):
        update_op.eval()
      self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testMultiDimensional(self):
    with self.test_session() as sess:
      values_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(2, 2, 2))
      _enqueue_vector(sess,
                      values_queue,
                      [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
                      shape=(2, 2, 2))
      _enqueue_vector(sess,
                      values_queue,
                      [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
                      shape=(2, 2, 2))
      values = values_queue.dequeue()

      mean, update_op = metrics.streaming_mean_tensor(values)

      sess.run(tf.local_variables_initializer())
      for _ in range(2):
        sess.run(update_op)
      self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]],
                          sess.run(mean))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testUpdateOpsReturnsCurrentValue(self):
    with self.test_session() as sess:
      values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      mean, update_op = metrics.streaming_mean_tensor(values)

      sess.run(tf.local_variables_initializer())

      self.assertAllClose([[0, 1]], sess.run(update_op), 5)
      self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
      self.assertAllClose([[2.3/3., 10.1/3.]], sess.run(update_op), 5)
      self.assertAllClose([[-0.9/4., 3.525]], sess.run(update_op), 5)

      self.assertAllClose([[-0.9/4., 3.525]], sess.run(mean), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testWeighted1d(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      # Create the queue that populates the weights.
      weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
      _enqueue_vector(sess, weights_queue, [[1]])
      _enqueue_vector(sess, weights_queue, [[0]])
      _enqueue_vector(sess, weights_queue, [[1]])
      _enqueue_vector(sess, weights_queue, [[0]])
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean_tensor(values, weights)

      sess.run(tf.local_variables_initializer())
      for _ in range(4):
        sess.run(update_op)
      self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testWeighted2d_1(self):
    with self.test_session() as sess:
      # Create the queue that populates the values.
      values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
      _enqueue_vector(sess, values_queue, [0, 1])
      _enqueue_vector(sess, values_queue, [-4.2, 9.1])
      _enqueue_vector(sess, values_queue, [6.5, 0])
      _enqueue_vector(sess, values_queue, [-3.2, 4.0])
      values = values_queue.dequeue()

      # Create the queue that populates the weights.
      weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
      _enqueue_vector(sess, weights_queue, [1, 1])
      _enqueue_vector(sess, weights_queue, [1, 0])
      _enqueue_vector(sess, weights_queue, [0, 1])
      _enqueue_vector(sess, weights_queue, [0, 0])
      weights = weights_queue.dequeue()

      mean, update_op = metrics.streaming_mean_tensor(values, weights)

      sess.run(tf.local_variables_initializer())
      for _ in range(4):
        sess.run(update_op)
      self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
    labels = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
    accuracy, update_op = metrics.streaming_accuracy(
        predictions, labels)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_accuracy = accuracy.eval()
      for _ in range(10):
        self.assertEqual(initial_accuracy, accuracy.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
    predictions = tf.convert_to_tensor([1, 1, 1])  # shape 3,
    labels = tf.expand_dims(tf.convert_to_tensor([1, 0, 0]), 1)  # shape 3, 1

    weights = [[100], [1], [1]]  # shape 3, 1
    weights_placeholder = tf.placeholder(dtype=tf.int32, name='weights')
    feed_dict = {weights_placeholder: weights}

    with self.test_session() as sess:
      accuracy, update_op = metrics.streaming_accuracy(
          predictions, labels, weights_placeholder)

      sess.run(tf.local_variables_initializer())
      # if streaming_accuracy does not flatten the weight, accuracy would be
      # 0.33333334 due to an intended broadcast of weight. Due to flattening,
      # it will be higher than .95
      self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
      self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
    labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
    precision, update_op = metrics.streaming_precision(
        predictions, labels)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_precision = precision.eval()
      for _ in range(10):
        self.assertEqual(initial_precision, precision.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testWeighted1d_placeholders(self):
    predictions = tf.placeholder(dtype=tf.float32)
    labels = tf.placeholder(dtype=tf.float32)
    feed_dict = {
        predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
        labels: ((0, 1, 1, 0), (1, 0, 0, 1))
    }
    precision, update_op = metrics.streaming_precision(
        predictions, labels, weights=tf.constant([[2], [5]]))

    with self.test_session():
      tf.local_variables_initializer().run()
      weighted_tp = 2.0 + 5.0
      weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
      expected_precision = weighted_tp / weighted_positives
      self.assertAlmostEqual(
          expected_precision, update_op.eval(feed_dict=feed_dict))
      self.assertAlmostEqual(
          expected_precision, precision.eval(feed_dict=feed_dict))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testWeighted2d_placeholders(self):
    predictions = tf.placeholder(dtype=tf.float32)
    labels = tf.placeholder(dtype=tf.float32)
    feed_dict = {
        predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
        labels: ((0, 1, 1, 0), (1, 0, 0, 1))
    }
    precision, update_op = metrics.streaming_precision(
        predictions, labels, weights=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))

    with self.test_session():
      tf.local_variables_initializer().run()
      weighted_tp = 3.0 + 4.0
      weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
      expected_precision = weighted_tp / weighted_positives
      self.assertAlmostEqual(
          expected_precision, update_op.eval(feed_dict=feed_dict))
      self.assertAlmostEqual(
          expected_precision, precision.eval(feed_dict=feed_dict))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
    labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
    recall, update_op = metrics.streaming_recall(
        predictions, labels)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_recall = recall.eval()
      for _ in range(10):
        self.assertEqual(initial_recall, recall.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
    labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
    auc, update_op = metrics.streaming_auc(
        predictions, labels)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_auc = auc.eval()
      for _ in range(10):
        self.assertAlmostEqual(initial_auc, auc.eval(), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
    labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
    specificity, update_op = metrics.streaming_specificity_at_sensitivity(
        predictions, labels, sensitivity=0.7)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_specificity = specificity.eval()
      for _ in range(10):
        self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testWeighted1d(self):
    predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
                          0.1, 0.2, 0.2, 0.26, 0.26]
    labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
    weights_values = [3]

    predictions = tf.constant(predictions_values, dtype=tf.float32)
    labels = tf.constant(labels_values)
    weights = tf.constant(weights_values)
    specificity, update_op = metrics.streaming_specificity_at_sensitivity(
        predictions, labels, weights=weights, sensitivity=0.4)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      self.assertAlmostEqual(0.6, sess.run(update_op))
      self.assertAlmostEqual(0.6, specificity.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testWeighted2d(self):
    predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
                          0.1, 0.2, 0.2, 0.26, 0.26]
    labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
    weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]

    predictions = tf.constant(predictions_values, dtype=tf.float32)
    labels = tf.constant(labels_values)
    weights = tf.constant(weights_values)
    specificity, update_op = metrics.streaming_specificity_at_sensitivity(
        predictions, labels, weights=weights, sensitivity=0.4)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
      self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
    labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
    sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
        predictions, labels, specificity=0.7)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_sensitivity = sensitivity.eval()
      for _ in range(10):
        self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testWeighted(self):
    predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4,
                          0.01, 0.02, 0.25, 0.26, 0.26]
    labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
    weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]

    predictions = tf.constant(predictions_values, dtype=tf.float32)
    labels = tf.constant(labels_values)
    weights = tf.constant(weights_values)
    specificity, update_op = metrics.streaming_sensitivity_at_specificity(
        predictions, labels, weights=weights, specificity=0.4)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())
      self.assertAlmostEqual(0.675, sess.run(update_op))
      self.assertAlmostEqual(0.675, specificity.eval())


# TODO(nsilberman): Break this up into two sets of tests.
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
    labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
    thresholds = [0, 0.5, 1.0]
    prec, prec_op = metrics.streaming_precision_at_thresholds(
        predictions, labels, thresholds)
    rec, rec_op = metrics.streaming_recall_at_thresholds(
        predictions, labels, thresholds)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      # Run several updates, then verify idempotency.
      sess.run([prec_op, rec_op])
      initial_prec = prec.eval()
      initial_rec = rec.eval()
      for _ in range(10):
        sess.run([prec_op, rec_op])
        self.assertAllClose(initial_prec, prec.eval())
        self.assertAllClose(initial_rec, rec.eval())

  # TODO(nsilberman): fix tests (passing but incorrect).
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testAllCorrect(self):
    inputs = np.random.randint(0, 2, size=(100, 1))

    with self.test_session() as sess:
      predictions = tf.constant(inputs, dtype=tf.float32)
      labels = tf.constant(inputs)
      thresholds = [0.5]
      prec, prec_op = metrics.streaming_precision_at_thresholds(
          predictions, labels, thresholds)
      rec, rec_op = metrics.streaming_recall_at_thresholds(
          predictions, labels, thresholds)

      sess.run(tf.local_variables_initializer())
      sess.run([prec_op, rec_op])

      self.assertEqual(1, prec.eval())
      self.assertEqual(1, rec.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testAllIncorrect(self):
    inputs = np.random.randint(0, 2, size=(100, 1))

    with self.test_session() as sess:
      predictions = tf.constant(inputs, dtype=tf.float32)
      labels = tf.constant(1 - inputs, dtype=tf.float32)
      thresholds = [0.5]
      prec, prec_op = metrics.streaming_precision_at_thresholds(
          predictions, labels, thresholds)
      rec, rec_op = metrics.streaming_recall_at_thresholds(
          predictions, labels, thresholds)

      sess.run(tf.local_variables_initializer())
      sess.run([prec_op, rec_op])

      self.assertAlmostEqual(0, prec.eval())
      self.assertAlmostEqual(0, rec.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testExtremeThresholds(self):
    with self.test_session() as sess:
      predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
      labels = tf.constant([0, 1, 1, 1], shape=(1, 4))
      thresholds = [-1.0, 2.0]  # lower/higher than any values
      prec, prec_op = metrics.streaming_precision_at_thresholds(
          predictions, labels, thresholds)
      rec, rec_op = metrics.streaming_recall_at_thresholds(
          predictions, labels, thresholds)

      [prec_low, prec_high] = tf.split(0, 2, prec)
      [rec_low, rec_high] = tf.split(0, 2, rec)

      sess.run(tf.local_variables_initializer())
      sess.run([prec_op, rec_op])

      self.assertAlmostEqual(0.75, prec_low.eval())
      self.assertAlmostEqual(0.0, prec_high.eval())
      self.assertAlmostEqual(1.0, rec_low.eval())
      self.assertAlmostEqual(0.0, rec_high.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testSingleUpdateKIs2(self):
    predictions = tf.constant(self._np_predictions,
                              shape=(self._batch_size, self._num_classes),
                              dtype=tf.float32)
    labels = tf.constant(
        self._np_labels, shape=(self._batch_size,), dtype=tf.int64)
    recall, update_op = metrics.streaming_recall_at_k(
        predictions, labels, k=2)
    sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
        predictions, tf.reshape(labels, (self._batch_size, 1)), k=2)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())
      self.assertEqual(0.5, sess.run(update_op))
      self.assertEqual(0.5, recall.eval())
      self.assertEqual(0.5, sess.run(sp_update_op))
      self.assertEqual(0.5, sp_recall.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testSingleUpdateKIs3(self):
    predictions = tf.constant(self._np_predictions,
                              shape=(self._batch_size, self._num_classes),
                              dtype=tf.float32)
    labels = tf.constant(
        self._np_labels, shape=(self._batch_size,), dtype=tf.int64)
    recall, update_op = metrics.streaming_recall_at_k(
        predictions, labels, k=3)
    sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
        predictions, tf.reshape(labels, (self._batch_size, 1)), k=3)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())
      self.assertEqual(1.0, sess.run(update_op))
      self.assertEqual(1.0, recall.eval())
      self.assertEqual(1.0, sess.run(sp_update_op))
      self.assertEqual(1.0, sp_recall.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testSingleUpdateSomeMissingKIs2(self):
    predictions = tf.constant(self._np_predictions,
                              shape=(self._batch_size, self._num_classes),
                              dtype=tf.float32)
    labels = tf.constant(
        self._np_labels, shape=(self._batch_size,), dtype=tf.int64)
    weights = tf.constant([0, 1, 0, 1], shape=(self._batch_size,),
                          dtype=tf.float32)
    recall, update_op = metrics.streaming_recall_at_k(
        predictions, labels, k=2, weights=weights)
    sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
        predictions, tf.reshape(labels, (self._batch_size, 1)), k=2,
        weights=weights)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())
      self.assertEqual(1.0, sess.run(update_op))
      self.assertEqual(1.0, recall.eval())
      self.assertEqual(1.0, sess.run(sp_update_op))
      self.assertEqual(1.0, sp_recall.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testValueTensorIsIdempotent(self):
    predictions = tf.random_normal((10, 3), seed=1)
    labels = tf.random_normal((10, 3), seed=2)
    error, update_op = metrics.streaming_mean_absolute_error(
        predictions, labels)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())

      # Run several updates.
      for _ in range(10):
        sess.run(update_op)

      # Then verify idempotency.
      initial_error = error.eval()
      for _ in range(10):
        self.assertEqual(initial_error, error.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testSingleUpdateNormalizedByLabels(self):
    np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
    np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
    expected_error = np.mean(
        np.divide(np.absolute(np_predictions - np_labels),
                  np_labels))

    predictions = tf.constant(np_predictions, shape=(1, 4), dtype=tf.float32)
    labels = tf.constant(np_labels, shape=(1, 4))

    error, update_op = metrics.streaming_mean_relative_error(
        predictions, labels, normalizer=labels)

    with self.test_session() as sess:
      sess.run(tf.local_variables_initializer())
      self.assertEqual(expected_error, sess.run(update_op))
      self.assertEqual(expected_error, error.eval())