Python reader 模块,ptb_raw_data() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用reader.ptb_raw_data()

项目:Language-Model-2016    作者:feizhihui    | 项目源码 | 文件源码
def main():
    # ?? ???,???,???
    raw_data = reader.ptb_raw_data('/home/feizhihui/MyData/dataset/PTB')
    train_data, valid_data, test_data, vocab = raw_data
    # ??????config{small,medium,large,or test}
    # ??2?????,?????config??????
    config = TestConfig()
    with tf.Graph().as_default(), tf.Session() as session:
        with tf.variable_scope("model"):
            m = PTBModel(is_training=True, config=config)

        saver = tf.train.Saver()
        saver.restore(session, "PTB_Model/PTB_Variables.ckpt")

        print(vocab)
        rvocab = dict(zip(vocab.values(), vocab.keys()))
        lstm_state_value = session.run(m.initial_state)
        while True:
            lstm_state_value = predict(session, m, vocab, rvocab, lstm_state_value)
项目:tensorflow_with_latest_papers    作者:NickShahML    | 项目源码 | 文件源码
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config)
      mtest = PTBModel(is_training=False, config=eval_config)

    tf.initialize_all_variables().run()

    for i in range(config.max_max_epoch):
      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity = run_epoch(session, m, train_data, m.train_op,
                                   verbose=True)
      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
    print("Test Perplexity: %.3f" % test_perplexity)
项目:taas-examples    作者:caicloud    | 项目源码 | 文件源码
def testPtbRawData(self):
    tmpdir = tf.test.get_temp_dir()
    for suffix in "train", "valid", "test":
      filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
      with tf.gfile.GFile(filename, "w") as fh:
        fh.write(self._string_data)
    # Smoke test
    output = reader.ptb_raw_data(tmpdir)
    self.assertEqual(len(output), 4)
项目:taas-examples    作者:caicloud    | 项目源码 | 文件源码
def run():
    client = serving_grpc_client.GRPCClient('localhost:50051')

    # ?? PTB ???
    print("Loading ptb data...")
    train_data, valid_data, test_data, _ = reader.ptb_raw_data(FLAGS.data_path)

    # ?? PTB ???????? 10 ???????????????????? 11 ????
    state = {}
    logits = None
    for i in range(10):
        inputs = {
            'input': tf.contrib.util.make_tensor_proto(test_data[i], shape=[1,1])
        }

        # ????????????????????
        # ?????????????????????
        if i > 0:
            for key in state:
                inputs[key] = tf.contrib.util.make_tensor_proto(state[key])

        outputs = client.call_predict(inputs)

        # ??????????? logits ????????????
        for key in outputs:
            if key == "logits":
                logits = tf.contrib.util.make_ndarray(outputs[key])
            else:
                state[key] = tf.contrib.util.make_ndarray(outputs[key])
    print('logits: {0}'.format(logits))
项目:rnn_benchmarks    作者:caglar    | 项目源码 | 文件源码
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data
  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config)
      mtest = PTBModel(is_training=False, config=eval_config)

    tf.initialize_all_variables().run()
    wpss = []
    for i in range(config.max_max_epoch):
      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity, wps = run_epoch(session, m, train_data, m.train_op,
                                   verbose=True)
      wpss.append(wps)
      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      valid_perplexity, _ = run_epoch(session, mvalid, valid_data, tf.no_op())
      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    test_perplexity, _ = run_epoch(session, mtest, test_data, tf.no_op())
    print("Test Perplexity: %.3f" % test_perplexity)
    print("Mean wps: ", np.mean(wpss))
    print("Std wps:", np.std(wpss))
项目:tf-sparql    作者:derdav3    | 项目源码 | 文件源码
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config)
      mtest = PTBModel(is_training=False, config=eval_config)

    tf.initialize_all_variables().run()

    for i in range(config.max_max_epoch):
      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity = run_epoch(session, m, train_data, m.train_op,
                                   verbose=True)
      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
    print("Test Perplexity: %.3f" % test_perplexity)
项目:ActionRecognition    作者:woodfrog    | 项目源码 | 文件源码
def testPtbRawData(self):
        tmpdir = tf.test.get_temp_dir()
        for suffix in "train", "valid", "test":
            filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
            with tf.gfile.GFile(filename, "w") as fh:
                fh.write(self._string_data)
        # Smoke test
        output = reader.ptb_raw_data(tmpdir)
        self.assertEqual(len(output), 4)
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  if config.device == '-1':
    tf_dev = '/cpu:0'
  else:
    tf_dev = '/gpu:' + config.device

  print(tf_dev)
  tconfig = tf.ConfigProto(allow_soft_placement=True)
  if tf_dev.find('cpu') >= 0: # cpu version
    num_threads = os.getenv('OMP_NUM_THREADS', 1)
    tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
  with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    # with tf.variable_scope("model", reuse=True, initializer=initializer):
    #   mvalid = PTBModel(is_training=False, config=config)
    #   mtest = PTBModel(is_training=False, config=eval_config)

    tf.initialize_all_variables().run()

    for i in range(config.max_max_epoch):
      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity = run_epoch(session, m, train_data, m.train_op, config.iters, 
                                   verbose=True)
#      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
#      valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
#      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

#    test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
#    print("Test Perplexity: %.3f" % test_perplexity)
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  if config.device == '-1':
    tf_dev = '/cpu:0'
  else:
    tf_dev = '/gpu:' + config.device

  print(tf_dev)
  tconfig = tf.ConfigProto(allow_soft_placement=True)
  if tf_dev.find('cpu') >= 0: # cpu version
    num_threads = os.getenv('OMP_NUM_THREADS', 1)
    tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
  with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
       #mvalid = PTBModel(is_training=False, config=config)
       mtest = PTBModel(is_training=False, config=eval_config)

    tf.global_variables_initializer().run()

    total_average_batch_time = 0.0

    epochs_info = []
    for i in range(config.max_max_epoch):
      #lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      #m.assign_lr(session, config.learning_rate * lr_decay)
      m.assign_lr(session, config.learning_rate)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity, average_batch_time = run_epoch(session, m, train_data, m.train_op, verbose=True)
      total_average_batch_time += average_batch_time
      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      if i % 2 == 0:
         epochs_info.append('%d:_:%.3f'%(i, train_perplexity)) 
#      valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
#      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    print("average_batch_time: %.6f" % (total_average_batch_time/int(config.max_max_epoch)))
    print('epoch_info:'+','.join(epochs_info))

    test_perplexity, test_average_batch_time = run_epoch(session, mtest, test_data, tf.no_op())
    print("Test Perplexity: %.3f" % test_perplexity)
项目:YellowFin    作者:JianGoForIt    | 项目源码 | 文件源码
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default():
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)

    with tf.name_scope("Train"):
      train_input = PTBInput(config=config, data=train_data, name="TrainInput")
      with tf.variable_scope("Model", reuse=None, initializer=initializer):
        m = PTBModel(is_training=True, config=config, input_=train_input)
      tf.scalar_summary("Training Loss", m.cost)
      tf.scalar_summary("Learning Rate", m.lr)

    with tf.name_scope("Valid"):
      valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
      tf.scalar_summary("Validation Loss", mvalid.cost)

    with tf.name_scope("Test"):
      test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mtest = PTBModel(is_training=False, config=eval_config,
                         input_=test_input)

    sv = tf.train.Supervisor(logdir=FLAGS.save_path)
    with sv.managed_session() as session:
    # session = sv.managed_session()
    # with tf.Session() as session:
      for i in range(config.max_max_epoch):
        lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)

        print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
        train_perplexity = run_epoch(session, m, eval_op=m.train_op,
                                     verbose=True)
        print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
        valid_perplexity = run_epoch(session, mvalid)
        print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

      test_perplexity = run_epoch(session, mtest)
      print("Test Perplexity: %.3f" % test_perplexity)

      if FLAGS.save_path:
        print("Saving model to %s." % FLAGS.save_path)
        sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
项目:DeepLearningAndTensorflow    作者:azheng333    | 项目源码 | 文件源码
def main(_):
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")

    raw_data = reader.ptb_raw_data(FLAGS.data_path)
    train_data, valid_data, test_data, _ = raw_data

    config = get_config()
    eval_config = get_config()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default():
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)

        with tf.name_scope("Train"):
            train_input = PTBInput(config=config, data=train_data, name="TrainInput")
            with tf.variable_scope("Model", reuse=None, initializer=initializer):
                m = PTBModel(is_training=True, config=config, input_=train_input)
            tf.summary.scalar("Training Loss", m.cost)
            tf.summary.scalar("Learning Rate", m.lr)

        with tf.name_scope("Valid"):
            valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
            with tf.variable_scope("Model", reuse=True, initializer=initializer):
                mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
            tf.summary.scalar("Validation Loss", mvalid.cost)

        with tf.name_scope("Test"):
            test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
            with tf.variable_scope("Model", reuse=True, initializer=initializer):
                mtest = PTBModel(is_training=False, config=eval_config,
                                 input_=test_input)

        sv = tf.train.Supervisor(logdir=FLAGS.save_path)
        with sv.managed_session() as session:
            for i in range(config.max_max_epoch):
                lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
                m.assign_lr(session, config.learning_rate * lr_decay)

                print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
                train_perplexity = run_epoch(session, m, eval_op=m.train_op,
                                             verbose=True)
                print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
                valid_perplexity = run_epoch(session, mvalid)
                print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

            test_perplexity = run_epoch(session, mtest)
            print("Test Perplexity: %.3f" % test_perplexity)

            if FLAGS.save_path:
                print("Saving model to %s." % FLAGS.save_path)
                sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
项目:taas-examples    作者:caicloud    | 项目源码 | 文件源码
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default():
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)

    with tf.name_scope("Train"):
      train_input = PTBInput(config=config, data=train_data, name="TrainInput")
      with tf.variable_scope("Model", reuse=None, initializer=initializer):
        m = PTBModel(is_training=True, config=config, input_=train_input)
      tf.summary.scalar("Training Loss", m.cost)
      tf.summary.scalar("Learning Rate", m.lr)

    with tf.name_scope("Valid"):
      valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
      tf.summary.scalar("Validation Loss", mvalid.cost)

    with tf.name_scope("Test"):
      test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mtest = PTBModel(is_training=False, config=eval_config,
                         input_=test_input)

    sv = tf.train.Supervisor(logdir=FLAGS.save_path)
    with sv.managed_session() as session:
      for i in range(config.max_max_epoch):
        lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)

        print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
        train_perplexity = run_epoch(session, m, eval_op=m.train_op,
                                     verbose=True)
        print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
        valid_perplexity = run_epoch(session, mvalid)
        print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

      test_perplexity = run_epoch(session, mtest)
      print("Test Perplexity: %.3f" % test_perplexity)

      if FLAGS.save_path:
        print("Saving model to %s." % FLAGS.save_path)
        sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
项目:tensorflow-statereader    作者:sebastianGehrmann    | 项目源码 | 文件源码
def main(_):
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")

    raw_data = reader.ptb_raw_data(FLAGS.data_path, True)
    train_data, valid_data, _ = raw_data

    with tf.Graph().as_default():
        initializer = tf.random_uniform_initializer(-FLAGS.init_scale,
                                                    FLAGS.init_scale)

        with tf.name_scope("Train"):
            train_input = PTBInput(data=train_data, name="TrainInput")
            with tf.variable_scope("Model", reuse=None, initializer=initializer):
                m = PTBModel(is_training=True, input_=train_input)
            tf.summary.scalar("Training Loss", m.cost)
            tf.summary.scalar("Learning Rate", m.lr)

        with tf.name_scope("Train_states"):
            train_input = PTBInput(data=train_data, name="TrainInput")
            with tf.variable_scope("Model", reuse=True, initializer=initializer):
                mstates = PTBModel(is_training=False, input_=train_input)
            tf.summary.scalar("Training Loss", mstates.cost)

        with tf.name_scope("Valid"):
            valid_input = PTBInput(data=valid_data, name="ValidInput")
            with tf.variable_scope("Model", reuse=True, initializer=initializer):
                mvalid = PTBModel(is_training=False, input_=valid_input)
            tf.summary.scalar("Validation Loss", mvalid.cost)


        sv = tf.train.Supervisor(logdir=FLAGS.save_path)
        with sv.managed_session() as session:
            if FLAGS.load_path:
                sv.saver.restore(session, tf.train.latest_checkpoint(FLAGS.load_path))
            else:
                for i in range(FLAGS.max_max_epoch):
                    lr_decay = FLAGS.lr_decay ** max(i + 1 - FLAGS.max_epoch, 0.0)
                    m.assign_lr(session, FLAGS.learning_rate * lr_decay)

                    print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
                    train_perplexity, stat = run_epoch(session, m, eval_op=m.train_op,
                                                       verbose=True)
                    print(stat.shape)
                    print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
                    valid_perplexity, stat = run_epoch(session, mvalid)
                    print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
            # run and store the states on training set
            train_perplexity, stat = run_epoch(session, mstates, eval_op=m.train_op,
                                               verbose=True)
            f = h5py.File("states.h5", "w")
            stat = np.reshape(stat, (-1, mstates.size))
            f["states1"] = stat
            f.close()

            if FLAGS.save_path:
                print("Saving model to %s." % FLAGS.save_path)
                sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
项目:Language-Model-2016    作者:feizhihui    | 项目源码 | 文件源码
def main():
    # ?? ???,???,???
    raw_data = reader.ptb_raw_data('/home/feizhihui/MyData/dataset/PTB/')
    train_data, valid_data, test_data, _ = raw_data
    # ??????config{small,medium,large,or test}
    # ??2?????,?????config??????
    config = Config()
    eval_config = Config()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default(), tf.Session() as session:
        # ??????????
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        # ????????,????,????
        # reuse=None ???????
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = PTBModel(is_training=True, config=config)
        # ???????
        with tf.variable_scope("model", reuse=True, initializer=initializer):
            mvalid = PTBModel(is_training=False, config=config)
            mtest = PTBModel(is_training=False, config=eval_config)

        tf.global_variables_initializer().run()

        for i in range(config.max_max_epoch):
            # 0.5**(0,..,0 and 1 and 2,.. )
            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            # ????max_epoch???????????
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity, train_accuracy = run_epoch(session, m, train_data, m.train_op,
                                                         verbose=True)
            print("Epoch: %d Train Perplexity: %.3f, Train Accuracy: %.3f"
                  % (i + 1, train_perplexity, train_accuracy))
            valid_perplexity, valid_accuracy = run_epoch(session, mvalid, valid_data, tf.no_op(), verbose=True)
            print("Epoch: %d Valid Perplexity: %.3f, Valid Accuracy: %.3f"
                  % (i + 1, valid_perplexity, valid_accuracy))

        test_perplexity, test_accuracy = run_epoch(session, mtest, test_data, tf.no_op(), verbose=True)
        print("Test Perplexity: %.3f, Test Accuracy: %.3f" % (test_perplexity, test_accuracy))
        saver = tf.train.Saver()
        save_path = saver.save(session, "./PTB_Model/PTB_Variables.ckpt")
        print("Save to path: ", save_path)
项目:bit-rnn    作者:hqythu    | 项目源码 | 文件源码
def main(_):
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")

    raw_data = reader.ptb_raw_data(FLAGS.data_path)
    train_data, valid_data, test_data, _ = raw_data

    config = get_config()
    eval_config = get_config()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.uniform_unit_scaling_initializer()
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = PTBModel(is_training=True, config=config)
        with tf.variable_scope("model", reuse=True, initializer=initializer):
            mvalid = PTBModel(is_training=False, config=config)
            mtest = PTBModel(is_training=False, config=eval_config)

        tf.global_variables_initializer().run()

        def get_learning_rate(epoch, config):
            base_lr = config.learning_rate
            if epoch <= config.nr_epoch_first_stage:
                return base_lr
            elif epoch <= config.nr_epoch_second_stage:
                return base_lr * 0.1
            else:
                return base_lr * 0.01

        for i in range(config.max_epoch):
            m.assign_lr(session, get_learning_rate(i, config))

            print("Epoch: %d Learning rate: %f"
                  % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(
                session, m, train_data, m.train_op, verbose=True)
            print("Epoch: %d Train Perplexity: %.3f"
                  % (i + 1, train_perplexity))
            valid_perplexity = run_epoch(
                session, mvalid, valid_data, tf.no_op())
            print("Epoch: %d Valid Perplexity: %.3f"
                  % (i + 1, valid_perplexity))

        test_perplexity = run_epoch(
            session, mtest, test_data, tf.no_op())
        print("Test Perplexity: %.3f" % test_perplexity)