Python model 模块,inference() 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用model.inference()

项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def test():
    with tf.Graph().as_default():
        image, label = input.get_input(LABEL_PATH, LABEL_FORMAT, IMAGE_PATH, IMAGE_FORMAT)
        logits = model.inference(image)
        top_k_op = tf.nn.in_top_k(logits, label, 1)

        variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Get summaries for TENSOR BOARD
        summary_op = tf.merge_all_summaries()
        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(input.FLAGS.eval_dir, graph_def=graph_def)

        while True:
            evaluate_model(saver, summary_writer, top_k_op, summary_op)
            if input.FLAGS.run_once:
                break
            time.sleep(input.FLAGS.eval_interval_secs)
项目:satellite-image-object-detection    作者:marcbelmont    | 项目源码 | 文件源码
def test_inference(self):
        with self.test_session() as sess:
            # Create model
            net = create_model(tf.zeros([1, IMG_SIZE, IMG_SIZE, 3]), .1)
            net_ph = tf.placeholder(tf.float32, shape=net.shape)
            infer = inference(net_ph, .1)

            # Test inference results
            output = np.zeros(net.shape).astype(np.float32)
            output[0, 1, 1, :5] = [.84, .4, .68, .346, .346]
            output[0, 1, 1, 10] = .3  # class
            output[0, 2, 2, :5] = [.84, .4, .68, .346, .346]
            output[0, 2, 2, 11] = .03  # class
            result = sess.run([infer], feed_dict={net_ph: output})
            p_box, p_classes, confidence, mask = result[0]

            # Test
            self.assertEqual(mask[0, 1, 1], 1)
            self.assertEqual(p_classes, 5)
            self.assertEqual(confidence, .3 * .84)
            self.assertListEqual(
                [round(x) for x in p_box.tolist()[0]],
                [50, 60, 30, 30],)
项目:age-gender-classification    作者:yunsangq    | 项目源码 | 文件源码
def evaluate(run_dir):
    with tf.Session() as sess:
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        num_eval = md['%s_counts' % FLAGS.eval_data]

        images, labels, _ = inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, mode='test',
                                   num_preprocess_threads=FLAGS.num_preprocess_threads)
        logits = inference(images, md['nlabels'], 1, reuse=False)
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(run_dir, sess.graph)
        saver = tf.train.Saver()

        eval_once(sess, saver, summary_writer, summary_op, logits, labels, num_eval)
项目:age-gender-classification    作者:yunsangq    | 项目源码 | 文件源码
def evaluate(run_dir):
    with tf.Session() as sess:
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        num_eval = md['%s_counts' % FLAGS.eval_data]

        images, labels, _ = inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, mode='test',
                                   num_preprocess_threads=FLAGS.num_preprocess_threads)
        logits = inference(images, md['nlabels'], 1, reuse=False)
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(run_dir, sess.graph)
        saver = tf.train.Saver()

        eval_once(sess, saver, summary_writer, summary_op, logits, labels, num_eval)
项目:mnist-multi-gpu    作者:normanheckscher    | 项目源码 | 文件源码
def evaluate():
    """Eval MNIST for a number of steps."""
    with tf.Graph().as_default() as g:
        # Get images and labels for MNIST.
        mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)
        images = mnist.test.images
        labels = mnist.test.labels

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = model.inference(images, keep_prob=1.0)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(predictions=logits, targets=labels, k=1)

        # Create saver to restore the learned variables for eval.
        saver = tf.train.Saver()

        eval_once(saver, top_k_op)
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def train():
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)
        image, label = input.get_input(LABEL_PATH, LABEL_FORMAT, IMAGE_PATH, IMAGE_FORMAT)
        logits = model.inference(image)
        loss = model.loss(logits, label)
        train_op = model.train(loss, global_step)
        saver = tf.train.Saver(tf.all_variables())
        summary_op = tf.merge_all_summaries()
        init = tf.initialize_all_variables()
        sess = tf.Session(config=tf.ConfigProto(log_device_placement=input.FLAGS.log_device_placement))
        sess.run(init)
        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)
        summary_writer = tf.train.SummaryWriter(input.FLAGS.train_dir, graph_def=sess.graph_def)
        for step in xrange(input.FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss])
            duration = time.time() - start_time
            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
            if step % 1 == 0:
                num_examples_per_step = input.FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)
                format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')
                print (format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch))
            if step % 10 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)
            # Save the model checkpoint periodically.
            if step % 25 == 0:
                checkpoint_path = os.path.join(input.FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
项目:satellite-image-object-detection    作者:marcbelmont    | 项目源码 | 文件源码
def load_inference(sess, ckptdir, threshold):
    images = tf.placeholder(tf.float32, shape=[None, IMG_SIZE, IMG_SIZE, 3])
    net = create_model(images, .1)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(max_to_keep=10)
    if ckptdir and os.path.exists(ckptdir) and not FLAGS.debug:
        checkpoint = tf.train.latest_checkpoint(ckptdir)
        if checkpoint:
            print('Restoring', checkpoint)
            saver.restore(sess, checkpoint)
    return inference(net, threshold), images

###########
# Helpers #
###########
项目:cnn_picture_gazebo    作者:liuyandong1988    | 项目源码 | 文件源码
def evaluate():
     # compare with labels fetch accuracy
    encode_to_tfrecords("/home/exbot/ros_kinect_gazebo/src/turtlemove/scripts/one_pictest/test.txt",
                        "/home/exbot/ros_kinect_gazebo/src/turtlemove/scripts/one_pictest", 'test.tfrecords', (37, 37))
    test_image, test_label = decode_from_tfrecords(
        '/home/exbot/ros_kinect_gazebo/src/turtlemove/scripts/one_pictest/test.tfrecords', num_epoch=None)
    test_images, test_labels = get_test_batch(
        test_image, test_label, batch_size=BATCH_SIZE, crop_size=32)
    # [batch, in_height, in_width, in_channels]
    test_images = tf.reshape(test_images, shape=[-1, 32, 32, 3])
    test_images = (tf.cast(test_images, tf.float32) / 255. - 0.5) * 2  # guiyi

    logits = model.inference(test_images, BATCH_SIZE, NUM_CLASSES)
    saver = tf.train.Saver(tf.global_variables())

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        rospy.loginfo("Reading checkpoints...")
        ckpt = tf.train.get_checkpoint_state(train_dir_mydata)

        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            global_step = ckpt.model_checkpoint_path.split(
                '/')[-1].split('-')[-1]
            saver.restore(sess, os.path.join(train_dir_mydata, ckpt_name))
            rospy.loginfo('Loading success, global_step is %s' % global_step)
            test_number = sess.run(tf.argmax(logits, 1))
            rospy.loginfo('*****recognized label:%s' %
                          recognize_label[test_number[0]] + '*****')
            os.remove(
                "/home/exbot/ros_kinect_gazebo/src/turtlemove/scripts/one_pictest/test.png")
            contr_turtle.contr(test_number[0])

        coord.request_stop()  # queue close
        coord.join(threads)
项目:mnist-multi-gpu    作者:normanheckscher    | 项目源码 | 文件源码
def tower_loss(scope):
    """Calculate the total loss on a single tower running the MNIST model.

    Args:
      scope: unique prefix string identifying the MNIST tower, e.g. 'tower_0'

    Returns:
       Tensor of shape [] containing the total loss for a batch of data
    """
    # Get images and labels for MSNIT.
    images, labels = model.inputs(FLAGS.batch_size)

    # Build inference Graph.
    logits = model.inference(images, keep_prob=0.5)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = model.loss(logits, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Attach a scalar summary to all individual losses and the total loss; do
    # the same for the averaged version of the losses.
    if (FLAGS.tb_logging):
        for l in losses + [total_loss]:
            # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU
            # training session. This helps the clarity of presentation on
            # tensorboard.
            loss_name = re.sub('%s_[0-9]*/' % model.TOWER_NAME, '', l.op.name)
            tf.summary.scalar(loss_name, l)

    return total_loss
项目:DL2W    作者:gauravmm    | 项目源码 | 文件源码
def generate_predictions(tfrecord_file,
                         train_dir,
                         predictions_file,
                         features_file,
                         batch_size,
                         num_k):
    ids, vectors, _ = data_loader.inputs([tfrecord_file], batch_size=batch_size,
                                         num_threads=16, capacity=batch_size*4,
                                         num_epochs=1, is_training=False)

    predictions = model.inference(vectors)
    features = tf.get_default_graph().get_tensor_by_name('fc1/relu:0')

    init_op = tf.local_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(init_op)
        saver.restore(sess, tf.train.latest_checkpoint(train_dir))

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        with open(predictions_file, 'w') as f1, open(features_file, 'w') as f2:
            f1.write('VideoId,LabelConfidencePairs\n')

            while True:
                try:
                    ids_out, predictions_out = sess.run(
                        [ids, predictions])
                except tf.errors.OutOfRangeError:
                    break

                for i, _ in enumerate(ids_out):
                    f1.write(ids_out[i].decode())
                    f1.write(',')
                    top_k = np.argsort(predictions_out[i])[::-1][:num_k]
                    for j in top_k:
                        f1.write('{} {:5f} '.format(j, predictions_out[i][j]))
                    f1.write('\n')

                    #f2.write(ids_out[i].decode())
                    #f2.write(',')
                    #for j in range(len(features_out[i]) - 1):
                    #    f2.write('{:6e},'.format(features_out[i][j]))
                    #f2.write('{:6e}'.format(features_out[i][-1]))
                    #f2.write('\n')

        coord.request_stop()
        coord.join(threads)
项目:age-gender-classification    作者:yunsangq    | 项目源码 | 文件源码
def main(argv=None):  # pylint: disable=unused-argument
    files = []

    if FLAGS.face_detection_model:
        print('Using face detector (%s) %s' % (FLAGS.face_detection_type, FLAGS.face_detection_model))
        face_detect = face_detection_model(FLAGS.face_detection_type, FLAGS.face_detection_model)
        face_files, rectangles = face_detect.run(FLAGS.filename)
        print(face_files)
        files += face_files

    with tf.Session() as sess:
        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)

        images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits = inference(images, nlabels, 1, reuse=False)

        checkpoint_path = '%s' % (FLAGS.model_dir)
        model_checkpoint_path, global_step = get_checkpoint(checkpoint_path)

        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)

        softmax_output = tf.nn.softmax(logits)

        coder = ImageCoder()

        # Support a batch mode if no face detection model
        if len(files) == 0:
            files.append(FLAGS.filename)
            # If it happens to be a list file, read the list and clobber the files
            if one_of(FLAGS.filename, ('csv', 'tsv', 'txt')):
                files = batchlist(FLAGS.filename)

        writer = None
        output = None
        if FLAGS.target:
            print('Creating output file %s' % FLAGS.target)
            output = open(FLAGS.target, 'w')
            writer = csv.writer(output)
            writer.writerow(('file', 'label', 'score'))

        for f in files:
            image_file = resolve_file(f)

            if image_file is None: continue

            try:
                best_choice = classify(sess, label_list, softmax_output, coder, images, image_file)
                if writer is not None:
                    writer.writerow((f, best_choice[0], '%.2f' % best_choice[1]))
            except Exception as e:
                print(e)
                print('Failed to run image %s ' % image_file)

        if output is not None:
            output.close()
项目:InceptionV3_TensorFlow    作者:MasazI    | 项目源码 | 文件源码
def train():
    with tf.Graph().as_default():
        # global step number
        global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
        dataset = DataSet()

        # get training set
        print("The number of training images is: %d" % (dataset.cnt_samples(FLAGS.predictcsv)))
        csv_predict = FLAGS.predictcsv
        lines = dataset.load_csv(csv_predict)
        lines.sort()

        images_ph = tf.placeholder(tf.float32, [1, 229, 229, 3])

        num_classes = FLAGS.num_classes
        restore_logits = not FLAGS.fine_tune

        # inference
        logits = model.inference(images_ph, num_classes, for_training=False, restore_logits=restore_logits)


        # Retain the summaries from the final tower.
        batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)

        # saver
        saver = tf.train.Saver(tf.all_variables())

        # Build the summary operation from the last tower summaries.
        summary_op = tf.merge_all_summaries()

        # initialization
        init = tf.initialize_all_variables()

        # session
        sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=FLAGS.log_device_placement))
        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("load: checkpoint %s" % (ckpt.model_checkpoint_path))
            saver.restore(sess, ckpt.model_checkpoint_path)

        print("start to predict.")
        for step, line in enumerate(lines):
            pil_img = Image.open(line[0])
            pil_img = pil_img.resize((250, 250))
            img_array_r = np.asarray(pil_img)
            img_array_r = img_array_r[15:244,15:244,:]
            img_array = img_array_r[None, ...]
            softmax_eval = sess.run([logits[2]], feed_dict={images_ph: img_array})
            print("%s,%s,%s" % (line[0], line[1], np.argmax(softmax_eval)))
        print("finish to predict.")
        coord.request_stop()
        coord.join(threads)
        sess.close()
项目:pythonLean    作者:527515025    | 项目源码 | 文件源码
def run_training():  

    # ???
    train_dir = '/Users/yangyibo/GitWork/pythonLean/AI/????/img/'   #My dir--20170727-csq  
    #logs_train_dir ??????????????tensorboard ??? 
    logs_train_dir = '/Users/yangyibo/GitWork/pythonLean/AI/????/saveNet/'  

    # ????????
    train, train_label = input_data.get_files(train_dir)  
    # ????
    train_batch, train_label_batch = input_data.get_batch(train,  
                                                          train_label,  
                                                          IMG_W,  
                                                          IMG_H,  
                                                          BATCH_SIZE,   
                                                          CAPACITY)
    # ????
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) 
    # ?? loss 
    train_loss = model.losses(train_logits, train_label_batch)
    # ?? 
    train_op = model.trainning(train_loss, learning_rate)
    # ????? 
    train__acc = model.evaluation(train_logits, train_label_batch)  
    # ?? summary
    summary_op = tf.summary.merge_all()  
    sess = tf.Session()
    # ??summary
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)  
    saver = tf.train.Saver()  

    sess.run(tf.global_variables_initializer())  
    coord = tf.train.Coordinator()  
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)  

    try:  
        for step in np.arange(MAX_STEP):  
            if coord.should_stop():  
                    break  
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])  

            if step % 50 == 0:  
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))  
                summary_str = sess.run(summary_op)  
                train_writer.add_summary(summary_str, step)  

            if step % 2000 == 0 or (step + 1) == MAX_STEP:  
                # ??2000????????????? checkpoint_path ?
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')  
                saver.save(sess, checkpoint_path, global_step=step)  

    except tf.errors.OutOfRangeError:  
        print('Done training -- epoch limit reached')  
    finally:  
        coord.request_stop()
    coord.join(threads)  
    sess.close()  

# train
项目:pythonLean    作者:527515025    | 项目源码 | 文件源码
def evaluate_one_image():  
    train_dir = '/Users/yangyibo/GitWork/pythonLean/AI/????/testImg/'  
    # ???????????
    train, train_label = input_data.get_files(train_dir) 
    image_array = get_one_image(train)  

    with tf.Graph().as_default():  
        BATCH_SIZE = 1  # ????????? ??batch ???1
        N_CLASSES = 2  # 2????????1?0? ?? ?0?1???????
        # ??????
        image = tf.cast(image_array, tf.float32)  
        # ?????
        image = tf.image.per_image_standardization(image)
        # ???????? [208, 208, 3] ???????? ????4D  ??? tensor
        image = tf.reshape(image, [1, 208, 208, 3])  
        logit = model.inference(image, BATCH_SIZE, N_CLASSES)  
        # ?? inference ????????????????????softmax ??
        logit = tf.nn.softmax(logit)  

        # ??????????????????? placeholder
        x = tf.placeholder(tf.float32, shape=[208, 208, 3])  

        # ?????????
        logs_train_dir = '/Users/yangyibo/GitWork/pythonLean/AI/????/saveNet/'   
        # ??saver 
        saver = tf.train.Saver()  

        with tf.Session() as sess:  

            print("???????????????")
            # ??????sess ? 
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)  
            if ckpt and ckpt.model_checkpoint_path:  
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]  
                saver.restore(sess, ckpt.model_checkpoint_path)  
                print('??????, ?????? %s' % global_step)  
            else:  
                print('???????????????')  
            # ??????????
            prediction = sess.run(logit, feed_dict={x: image_array})
            # ??????????????
            max_index = np.argmax(prediction)  
            if max_index==0:  
                print('???? %.6f' %prediction[:, 0])  
            else:  
                print('???? %.6f' %prediction[:, 1]) 
# ??