Python input_data 模块,read_data_sets() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用input_data.read_data_sets()

项目:tf-midi-id    作者:cghawthorne    | 项目源码 | 文件源码
def fill_feed_dict(data_set, images_pl, labels_pl):
  """Fills the feed_dict for training the given step.

  A feed_dict takes the form of:
  feed_dict = {
      <placeholder>: <tensor of values to be passed for placeholder>,
      ....
  }

  Args:
    data_set: The set of images and labels, from input_data.read_data_sets()
    images_pl: The images placeholder, from placeholder_inputs().
    labels_pl: The labels placeholder, from placeholder_inputs().

  Returns:
    feed_dict: The feed dictionary mapping from placeholders to values.
  """
  # Create the feed_dict for the placeholders filled with the next
  # `batch size ` examples.
  images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size)
  feed_dict = {
      images_pl: images_feed,
      labels_pl: labels_feed,
  }
  return feed_dict
项目:tf-midi-id    作者:cghawthorne    | 项目源码 | 文件源码
def do_eval(sess,
            eval_correct,
            midi_data_placeholder,
            labels_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.

  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    midi_data_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of midi data and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
                               midi_data_placeholder,
                               labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  precision = true_count / num_examples
  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision))
项目:Face_Point    作者:EllenSimith    | 项目源码 | 文件源码
def do_eval(sess,
            eval_correct,
            images_placeholder,
            points_placeholder,
            factors_placeholder, 
            crds_placeholder,
            width_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.

  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
                               images_placeholder,
                               points_placeholder,
                               factors_placeholder, 
                               crds_placeholder,
                               width_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  precision = true_count / num_examples
  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision))
  return precision
项目:TensorNet-TF    作者:timgaripov    | 项目源码 | 文件源码
def do_eval(sess,
            eval_correct,
            loss,
            data_set):
    """Runs one evaluation against the full epoch of data.
    Args:
        sess: The session in which the model has been trained.
        eval_correct: The Tensor that returns the number of correct predictions.
        images_placeholder: The images placeholder.
        labels_placeholder: The labels placeholder.
        data_set: The set of images and labels to evaluate, from
        input_data.read_data_sets().
    """
    # And run one epoch of eval.
    true_count = 0  # Counts the number of correct predictions.
    steps_per_epoch = data_set.num_examples // FLAGS.batch_size
    num_examples = steps_per_epoch * FLAGS.batch_size
    sum_loss = 0.0
    for step in xrange(steps_per_epoch):
        feed_dict = fill_feed_dict(data_set.next_batch(FLAGS.batch_size),
                                   train_phase=False)
        res = sess.run([loss, eval_correct], feed_dict=feed_dict)
        sum_loss += res[0]
        true_count += res[1]
    precision = true_count / num_examples
    avg_loss = sum_loss / (num_examples / FLAGS.batch_size)
    print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f  Loss: %.2f' %
          (num_examples, true_count, precision, avg_loss))
    return precision, avg_loss
项目:TensorNet-TF    作者:timgaripov    | 项目源码 | 文件源码
def do_eval(sess,
            eval_correct,
            loss,
            data_set):
    """Runs one evaluation against the full epoch of data.
    Args:
        sess: The session in which the model has been trained.
        eval_correct: The Tensor that returns the number of correct predictions.
        images_placeholder: The images placeholder.
        labels_placeholder: The labels placeholder.
        data_set: The set of images and labels to evaluate, from
        input_data.read_data_sets().
    """
    # And run one epoch of eval.
    true_count = 0  # Counts the number of correct predictions.
    steps_per_epoch = data_set.num_examples // FLAGS.batch_size
    num_examples = steps_per_epoch * FLAGS.batch_size
    sum_loss = 0.0
    for step in xrange(steps_per_epoch):
        feed_dict = fill_feed_dict(data_set.next_batch(FLAGS.batch_size),
                                   train_phase=False)
        res = sess.run([loss, eval_correct], feed_dict=feed_dict)
        sum_loss += res[0]
        true_count += res[1]
    precision = true_count / num_examples
    avg_loss = sum_loss / (num_examples / FLAGS.batch_size)
    print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f  Loss: %.2f' %
          (num_examples, true_count, precision, avg_loss))
    return precision, avg_loss
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def load_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    trainX, trainY, testX, testY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
    return trainX, trainY, testX, testY
项目:the-neural-perspective    作者:GokuMohandas    | 项目源码 | 文件源码
def load_data():
    # Load the data
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    trainX, trainY, testX, testY = mnist.train.images, mnist.train.labels, \
                             mnist.test.images, mnist.test.labels
    trainX = trainX.reshape(-1, 28, 28, 1)
    testX = testX.reshape(-1, 28, 28, 1)
    return trainX, trainY, testX, testY
项目:GSN    作者:peteykun    | 项目源码 | 文件源码
def main(sigma, sample_path='samples.npy'):

    # provide a .npy file where 10k generated samples are saved. 
    filename = sample_path

    print 'loading samples from %s'%filename

    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    test_X, test_Y = mnist.test.next_batch(mnist.test.num_examples)
    samples = numpy.load(filename)

    test_ll = numpy_parzen(test_X, samples, sigma)

    print "Mean Log-Likelihood of test set = %.5f" % numpy.mean(test_ll)
    print "Std of Mean Log-Likelihood of test set = %.5f" % (numpy.std(test_ll) / 100)
项目:interprettensor    作者:VigneshSrinivasan10    | 项目源码 | 文件源码
def test():

    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    with tf.Session() as sess:
        x = tf.placeholder(tf.float32, [FLAGS.batch_size, 784], name='input')
        with tf.variable_scope('model'):
            my_netowrk = layers()
            output = my_netowrk.forward(x)
            if FLAGS.relevance:
                RELEVANCE = my_netowrk.lrp(output, 'simple', 1.0)

        # Merge all the summaries and write them out 
        merged = tf.summary.merge_all()
        test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/my_model')

        # Intialize variables and reload your model
        saver = init_vars(sess)

        # Extract testing data 
        xs, ys = mnist.test.next_batch(FLAGS.batch_size)
        # Pass the test data to the restored model
        summary, relevance_test= sess.run([merged, RELEVANCE], feed_dict={x:(2*xs)-1})
        test_writer.add_summary(summary, 0)

        # Save the images as heatmaps to visualize on tensorboard
        images = xs.reshape([FLAGS.batch_size,28,28,1])
        images = (images + 1)/2.0
        relevances = relevance_test.reshape([FLAGS.batch_size,28,28,1])
        plot_relevances(relevances, images, test_writer )

        test_writer.close()
项目:the-neural-perspective    作者:johnsonc    | 项目源码 | 文件源码
def load_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    trainX, trainY, testX, testY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
    return trainX, trainY, testX, testY
项目:the-neural-perspective    作者:johnsonc    | 项目源码 | 文件源码
def load_data():
    # Load the data
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    trainX, trainY, testX, testY = mnist.train.images, mnist.train.labels, \
                             mnist.test.images, mnist.test.labels
    trainX = trainX.reshape(-1, 28, 28, 1)
    testX = testX.reshape(-1, 28, 28, 1)
    return trainX, trainY, testX, testY
项目:tree-structured-group-lasso    作者:jaesik817    | 项目源码 | 文件源码
def train(group):
  # Import data
  mnist = input_data.read_data_sets(FLAGS.data_dir,
                                    one_hot=True,
                                    fake_data=FLAGS.fake_data)
  tr_data, tr_label = mnist.train.next_batch(mnist.train._num_examples);
  # Dictionary Initialization
  M=len(tr_data[0]);
  D=tsgl.dict_initializer(M,FLAGS.P);
  # Learning
  lr=FLAGS.learning_rate; pre_mse=10;
  for i in range(1,FLAGS.max_steps+1):
    # Data Shuffle
    idx=range(len(tr_data));np.random.shuffle(idx);
    batch=tr_data[idx[:FLAGS.batch_num]].transpose();
    # Learning Rate Decay
    if(i%FLAGS.decay_num==0):
      lr=lr/float(FLAGS.decay_rate);
    # Sparse Coding
    A=tsgl.sparse_coding(D,batch,FLAGS,group);
    print(A[:,0]);print(A[:,1]);
    # Dictionary Learning
    D=tsgl.dictionary_learning(D,batch,A,lr,FLAGS);
    loss=np.linalg.norm(np.matmul(D,A)-batch,axis=0);mse=np.mean(loss);
    print(str(i)+"th MSE: "+str(mse));
    mse_diff=abs(mse-pre_mse);
    if(mse_diff<FLAGS.mse_diff_threshold):
      print("Learning Done");
      exit(1);
    pre_mse=mse;
  print("Max Iterations Done");
项目:codingame    作者:cpj1352    | 项目源码 | 文件源码
def main(argv):

    ################################
    # Enter your code between here #
    ################################

    mnist = input_data.read_data_sets(raw_input(), raw_input(), raw_input())

    # Start TF InteractiveSession
    sess = tf.InteractiveSession()

    # Build a Softmax Regression model
    ## Placeholders
    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    def weight_variable(shape):
        initial = tf.truncated_normal(shape, stddev=0.1)
        return tf.Variable(initial)

    def bias_variable(shape):
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)

    W1 = weight_variable([784,100])
    b1 = bias_variable([100])
    W2 = weight_variable([100,300])
    b2 = bias_variable([300])
    W3 = weight_variable([300,10])
    b3 = bias_variable([10])

    ## Initializing Variables
    sess.run(tf.initialize_all_variables())

    # Prediction class and loss function
    keep_prob = tf.placeholder(tf.float32)
    h1 = tf.nn.relu(tf.matmul(x,W1) + b1)
    h2 = tf.nn.relu(tf.matmul(h1,W2) + b2)
    y = tf.nn.softmax(tf.matmul(h2,W3) + b3)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

    # Train model
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
    for i in range(1000):
      batch = mnist.train.next_batch(100)
      train_step.run(feed_dict={x: batch[0], y_: batch[1]})


    # print ' '.join(map(str, [random.randint(0,9) for _ in range(len(mnist.validation.images))]))


    ########################
    #        And here      #
    ########################


    # Uncomment to get a prediction number for each image

    result = sess.run(tf.argmax(y,1), feed_dict={x: mnist.validation.images})
    print ' '.join(map(str, result))