我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用dataset.DataSet()。
def __init__(self, config): self.config = config self.data = DataSet(self.config) self.add_placeholders() self.summarizer = tf.summary self.net = Network(config) self.saver = tf.train.Saver() self.epoch_count, self.second_epoch_count = 0, 0 self.outputs, self.prob = self.net.neural_search() self.hyperparams = self.net.gen_hyperparams(self.outputs) self.hype_list = [1 for i in range(self.config.hyperparams)] #[7, 7, 24, 5, 5, 36, 3, 3, 48, 64] self.reinforce_loss = self.net.REINFORCE(self.prob) self.tr_cont_step = self.net.train_controller(self.reinforce_loss, self.val_accuracy) self.cNet, self.y_pred = self.init_child(self.hype_list) self.cross_loss, self.accuracy, self.tr_model_step = self.grow_child() self.init = tf.global_variables_initializer() self.local_init = tf.local_variables_initializer()
def read_ultimate(path, input_shape): ultimate_features = numpy.loadtxt(path + "ultimate_feature." + str(input_shape[0])) ultimate_features = numpy.reshape(ultimate_features, [-1, input_shape[0], input_shape[1]]) ultimate_labels = numpy.loadtxt(path + "ultimate_label." + str(input_shape[0])) # ultimate_labels = numpy.reshape(ultimate_labels, [-1, 1]) train_set = DataSet(ultimate_features, ultimate_labels) test_features = numpy.loadtxt(path + "ultimate_feature.test." + str(input_shape[0])) test_features = numpy.reshape(test_features, [-1, input_shape[0], input_shape[1]]) test_labels = numpy.loadtxt(path + "ultimate_label.test." + str(input_shape[0])) # test_labels = numpy.reshape(test_labels, [-1, 1]) test_set = DataSet(test_features, test_labels) return train_set, test_set
def read_feature(path, input_shape, prefix): ultimate_features = numpy.loadtxt("%s/%s_feature.%s" % (path, prefix, str(input_shape[0]))) ultimate_features = numpy.reshape(ultimate_features, [-1, input_shape[0], input_shape[1]]) ultimate_labels = numpy.loadtxt("%s/%s_label.%s" % (path, prefix, str(input_shape[0]))) # ultimate_labels = numpy.reshape(ultimate_labels, [-1, 1]) train_set = DataSet(ultimate_features, ultimate_labels) test_features = numpy.loadtxt("%s/%s_feature.test.%s" % (path, prefix, str(input_shape[0]))) test_features = numpy.reshape(test_features, [-1, input_shape[0], input_shape[1]]) test_labels = numpy.loadtxt("%s/%s_label.test.%s" % (path, prefix, str(input_shape[0]))) # test_labels = numpy.reshape(test_labels, [-1, 1]) test_set = DataSet(test_features, test_labels) return train_set, test_set
def run_train(): fout = open('inf.txt','w+') test_config = ModelConfig() test_config.keep_prob = 1.0 test_config.batch_size = 1 Session_config = tf.ConfigProto(allow_soft_placement = True) Session_config.gpu_options.allow_growth=True with tf.Graph().as_default(), tf.Session(config=Session_config) as sess: with tf.device('/gpu:0'): #if True: initializer = tf.random_uniform_initializer(-test_config.init_scale, test_config.init_scale) train_model = vgg16.Vgg16(FLAGS.vgg16_file_path) train_model.build(initializer) data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test') saver = tf.train.Saver(max_to_keep=100) last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model) print ('start: ',last_epoch + 1) test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) info = "Final: Test accury(top 1): %.4f Test accury(top 5): %.4f Loss %.4f" % (test_accury_1,test_accury_5,test_loss) print (info) fout.write(info + '\n') fout.flush() test_writer.close() print("Training step is compeleted!") fout.close()
def run_train(): fout = open('inf.txt','w+') test_config = ModelConfig() test_config.keep_prob = 1.0 test_config.batch_size = 1 Session_config = tf.ConfigProto(allow_soft_placement = True) Session_config.gpu_options.allow_growth=True with tf.Graph().as_default(), tf.Session(config=Session_config) as sess: with tf.device('/gpu:3'): #if True: initializer = tf.random_uniform_initializer(-test_config.init_scale, test_config.init_scale) train_model = vgg16.Vgg16(FLAGS.vgg16_file_path) train_model.build(initializer) data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test') saver = tf.train.Saver(max_to_keep=100) last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model) print ('start: ',last_epoch + 1) test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) info = "Final: Test accury(top 1): %.3f Test accury(top 5): %.3f Loss %.3f" % (test_accury_1,test_accury_5,test_loss) print (info) fout.write(info + '\n') fout.flush() test_writer.close() print("Training step is compeleted!") fout.close()
def __init__(self, state_shape, action_num, image_num_per_state, model, gamma=0.99, # discount factor replay_batch_size=32, replay_memory_size=5*10**4, target_model_update_freq=1, max_step=50, lr=0.00025, clipping=False # if True, ignore reward intensity ): print("initializing DQN...") self.action_num = action_num self.image_num_per_state = image_num_per_state self.gamma = gamma self.replay_batch_size = replay_batch_size self.replay_memory_size = replay_memory_size self.target_model_update_freq = target_model_update_freq self.max_step = max_step self.clipping = clipping print("Initializing Model...") self.model = model self.model_target = copy.deepcopy(self.model) print("Initializing Optimizer") self.optimizer = optimizers.RMSpropGraves(lr=lr, alpha=0.95, momentum=0.0, eps=0.01) self.optimizer.setup(self.model) self.optimizer.add_hook(chainer.optimizer.GradientClipping(20)) print("Initializing Replay Buffer...") self.dataset = dataset.DataSet( max_size=replay_memory_size, max_step=max_step, frame_shape=state_shape, frame_dtype=np.uint8) self.xp = model.xp self.state_shape = state_shape
def load_data(directory, data_processor_module): file_list = os.listdir(directory) data_set = DataSet(data_processor_module) for i, f in enumerate(file_list): full_filename = os.path.join(directory, f) img = cv2.imread(full_filename) if img is None: print("WARNING: File: '{}' could not be loaded".format(full_filename)) continue # The files will be of the type: # CLASS-source-frame-itemnumber.jpg label = f.split("-")[0].lower() if label == "nao_part" or label == "nothing" and random.random() > 0.2: # Because we have too many nao_parts, remove a lot of them continue # if label != "nao_part": data_set.add_image(img, label) if i % 100: print("{}/{} - {:5.2f}%".format(i, len(file_list), i*100./len(file_list)), end="\r") print(" ", end="\r") if not data_set.loaded_images: raise ValueError("Could not load any images.") data_set.process() return data_set
def run_train(): fout = open('inf.txt','w+') test_config = ModelConfig() test_config.keep_prob = 1.0 test_config.batch_size = 1 Session_config = tf.ConfigProto(allow_soft_placement = True) Session_config.gpu_options.allow_growth=True with tf.Graph().as_default(), tf.Session(config=Session_config) as sess: with tf.device('/gpu:1'): #if True: initializer = tf.random_uniform_initializer(-test_config.init_scale, test_config.init_scale) train_model = vgg16.Vgg16(FLAGS.vgg16_file_path) train_model.build(initializer) data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test') saver = tf.train.Saver(max_to_keep=100) last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model) print ('start: ',last_epoch + 1) test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) info = "Final: Test accury(top 1): %.3f Test accury(top 5): %.3f Loss %.3f" % (test_accury_1,test_accury_5,test_loss) print (info) fout.write(info + '\n') fout.flush() test_writer.close() print("Training step is compeleted!") fout.close()
def run_train(): fout = open('inf.txt','w+') test_config = ModelConfig() test_config.keep_prob = 1.0 test_config.batch_size = 1 Session_config = tf.ConfigProto(allow_soft_placement = True) Session_config.gpu_options.allow_growth=True with tf.Graph().as_default(), tf.Session(config=Session_config) as sess: with tf.device('/gpu:0'): #if True: initializer = tf.random_uniform_initializer(-test_config.init_scale, test_config.init_scale) train_model = vgg16.Vgg16(FLAGS.vgg16_file_path) train_model.build(initializer) data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test') saver = tf.train.Saver(max_to_keep=100) last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model) print ('start: ',last_epoch + 1) test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) info = "Final: Test accury(top 1): %.3f Test accury(top 5): %.3f Loss %.3f" % (test_accury_1,test_accury_5,test_loss) print (info) fout.write(info + '\n') fout.flush() test_writer.close() print("Training step is compeleted!") fout.close()
def run_train(): fout = open('inf.txt','w+') test_config = ModelConfig() test_config.keep_prob = 1.0 test_config.batch_size = 1 Session_config = tf.ConfigProto(allow_soft_placement = True) Session_config.gpu_options.allow_growth=True with tf.Graph().as_default(), tf.Session(config=Session_config) as sess: with tf.device('/gpu:2'): #if True: initializer = tf.random_uniform_initializer(-test_config.init_scale, test_config.init_scale) train_model = vgg16.Vgg16(FLAGS.vgg16_file_path) train_model.build(initializer) data_test = dataset.DataSet(FLAGS.file_path_test,FLAGS.data_root_dir,TEST_SIZE,is_train_set=False) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test') saver = tf.train.Saver(max_to_keep=100) last_epoch = load_model(sess, saver,FLAGS.saveModelPath,train_model) print ('start: ',last_epoch + 1) test_accury_1,test_accury_5,test_loss = run_epoch(sess,test_config.keep_prob, fout,test_config.batch_size, train_model, data_test, tf.no_op(),2,test_writer,istraining=False) info = "Final: Test accury(top 1): %.3f Test accury(top 5): %.3f Loss %.3f" % (test_accury_1,test_accury_5,test_loss) print (info) fout.write(info + '\n') fout.flush() test_writer.close() print("Training step is compeleted!") fout.close()
def import_mnist(validation_size=0): """ This import mnist and saves the data as an object of our DataSet class :param concat_val: Concatenate training and validation :return: """ SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' ONE_HOT = True TRAIN_DIR = 'experiments/data/MNIST_data' local_file = base.maybe_download(TRAIN_IMAGES, TRAIN_DIR, SOURCE_URL + TRAIN_IMAGES) with open(local_file) as f: train_images = extract_images(f) local_file = base.maybe_download(TRAIN_LABELS, TRAIN_DIR, SOURCE_URL + TRAIN_LABELS) with open(local_file) as f: train_labels = extract_labels(f, one_hot=ONE_HOT) local_file = base.maybe_download(TEST_IMAGES, TRAIN_DIR, SOURCE_URL + TEST_IMAGES) with open(local_file) as f: test_images = extract_images(f) local_file = base.maybe_download(TEST_LABELS, TRAIN_DIR, SOURCE_URL + TEST_LABELS) with open(local_file) as f: test_labels = extract_labels(f, one_hot=ONE_HOT) validation_images = train_images[:validation_size] validation_labels = train_labels[:validation_size] train_images = train_images[validation_size:] train_labels = train_labels[validation_size:] # process images train_images = process_mnist(train_images) validation_images = process_mnist(validation_images) test_images = process_mnist(test_images) # standardize data train_mean, train_std = get_data_info(train_images) train_images = standardize_data(train_images, train_mean, train_std) validation_images = standardize_data(validation_images, train_mean, train_std) test_images = standardize_data(test_images, train_mean, train_std) data = DataSet(train_images, train_labels) test = DataSet(test_images, test_labels) val = DataSet(validation_images, validation_labels) return data, test, val