Python utils 模块,load_dataset() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用utils.load_dataset()

项目:real_time_face_detection    作者:Snowapril    | 项目源码 | 文件源码
def main(FLAG):
    Model = SimpleModel(FLAG.input_dim, FLAG.hidden_dim, FLAG.output_dim, optimizer=tf.train.RMSPropOptimizer(FLAG.learning_rate))

    image, label = load_dataset()
    image, label = image_augmentation(image, label, horizon_flip=True, control_brightness=True)
    label = label / 96.
    (train_X, train_y), (valid_X, valid_y), (test_X, test_y) = split_data(image, label)

    if FLAG.Mode == "validation":
        lr_list = 10 ** np.random.uniform(-6, -2, 20)
        Model.validation(train_X, train_y, valid_X, valid_y, lr_list)
    elif FLAG.Mode == "train":
        Model.train(train_X, train_y, valid_X, valid_y, FLAG.batch_size, FLAG.Epoch, FLAG.save_graph, FLAG.save_model)

        pred_Y = Model.predict(test_X[123])
        print(pred_Y)
        print(test_y[123])
        print(np.mean(np.square( pred_Y - test_y[123] )))
项目:cs234_final_project    作者:nipunagarwala    | 项目源码 | 文件源码
def setup_actor_critic_model(args):
    dataset_dir = utils.choose_data(args)
    dataset = utils.load_dataset(dataset_dir)
    print "Using checkpoint directory: {0}".format(args.ckpt_dir)

    model = utils.choose_model(args) # pass in necessary model parameters
    print "Running {0} model for {1} epochs.".format(args.model, args.num_epochs)

    global_step = tf.Variable(0, trainable=False, name='global_step')
    saver = tf.train.Saver(max_to_keep=args.num_epochs)

    with tf.Session(config=GPU_CONFIG) as session:
        print "Inititialized TF Session!"

        # Checkpoint
        i_stopped, found_ckpt = utils.get_checkpoint(args, session, saver)
        # Summary Writer
        file_writer = tf.summary.FileWriter(args.ckpt_dir, graph=session.graph, max_queue=10, flush_secs=30)
        # Val or Test set accuracie

        # Make computational graph
        if args.train == "train" and not found_ckpt:
            init_op = tf.global_variables_initializer()
            init_op.run()
        else:
            if not found_ckpt:
                print "No checkpoint found for test or validation!"
                return

        model.load_yolo(session)
        # init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
        #                     model_path='/data/yolo/YOLO_small.ckpt',
        #                     var_list=model.variables_to_restore)
        # init_fn(session)

        if args.train == 'train':
            for i in xrange(i_stopped, args.num_epochs):
                run_actor_critic_model(args, model, session, dataset, file_writer, saver, i)
项目:stock_tournament    作者:howland    | 项目源码 | 文件源码
def train_model(parameters):
    X, Y, meta = utils.load_dataset(parameters['prefix'])
    print(X.shape)
    print(Y.shape)
    model_test = model.TournamentNn(input_dim=X.shape[1], labels_dim=Y.shape[1], prefix=parameters['prefix'])
    model_test.train(X, Y, prefix=parameters['prefix'], batch=True)
    # test_query = model_test.predict(query_input)
项目:StockPredictor    作者:wallsbreaker    | 项目源码 | 文件源码
def predict_classify(target_var, target_labels, model_path):
    #redefine model
    target_var = T.imatrix('y')
    target_labels = target_var
    dnn_strategy = model_path.split('/')[-1].split('_')[0]
    network = get_model_by_strategy(dnn_strategy)

    #load params
    params = []
    with open(model_path, 'r') as f:
        lines = f.readlines()
        for line in lines:
            params.append(np.array(json.loads(line)))
    set_all_param_values(network, params)

    predict_prediction = get_output(network, deterministic=True)
    predict_acc = binary_accuracy(predict_prediction, target_labels).mean()

    input_layer = get_all_layers(network)[0]
    predict = theano.function([input_layer.input_var, target_var],[predict_prediction, predict_acc])

    X, labels, values, _ = load_dataset('../../data/test')
    predict_prediction, predict_acc = predict(X, labels)

    sys.stdout.write("  predict accuracy:\t\t\t{} %\n".format(predict_acc * 100))

    #output predict result
    with open('../../data/prediction', 'w') as f:
        for ix in xrange(len(labels)):
            line = str(labels[ix]) + '\t' + str(values[ix]) + '\t' + str(predict_prediction[ix][0]) + '\n'
            f.write(line)
    sys.stdout.flush()
项目:StockPredictor    作者:wallsbreaker    | 项目源码 | 文件源码
def predict_regress(model_path):
    #redefine model
    target_var = T.fmatrix('y')
    target_labels = T.switch(T.gt(target_var, 0), 1, 0)
    dnn_strategy = model_path.split('/')[-1].split('_')[0]
    network = get_model_by_strategy(dnn_strategy)

    #load params
    params = []
    with open(model_path, 'r') as f:
        lines = f.readlines()
        for line in lines:
            params.append(np.array(json.loads(line)))
    set_all_param_values(network, params)

    predict_prediction = get_output(network, deterministic=True)
    predict_labels = T.switch(T.gt(predict_prediction, 0), 1, 0)
    predict_acc = binary_accuracy(predict_labels, target_labels, threshold=0).mean()

    input_layer = get_all_layers(network)[0]
    predict = theano.function([input_layer.input_var, target_var],[predict_prediction, predict_acc])

    X, y, labels, values, _, _, _, _, _, _ = load_dataset('../../data/test')
    predict_prediction, predict_acc = predict(X, y)

    sys.stdout.write("  predict accuracy:\t\t\t{} %\n".format(predict_acc * 100))

    #output predict result
    with open('../../data/prediction', 'w') as f:
        for ix in xrange(len(labels)):
            line = str(labels[ix]) + '\t' + str(values[ix]) + '\t' + str(predict_prediction[ix][0]) + '\n'
            f.write(line)
    sys.stdout.flush()
项目:lstm_gan    作者:vangaa    | 项目源码 | 文件源码
def main():
    args = utils.get_args()
    dataset = utils.load_dataset(os.path.join(args.data_path, DATASET_FILE))
    index2word, word2index = utils.load_dicts(os.path.join(args.data_path, VOCABULARY_FILE))

    print("Use dataset with {} sentences".format(dataset.shape[0]))

    batch_size = args.batch_size
    noise_size = args.noise_size
    with tf.Graph().as_default(), tf.Session() as session:   
        lstm_gan = LSTMGAN(
            SENTENCE_SIZE,
            VOCABULARY_SIZE,
            word2index[SENTENCE_START_TOKEN],
            hidden_size_gen = args.hid_gen,
            hidden_size_disc = args.hid_disc,
            input_noise_size = noise_size,
            batch_size = batch_size,
            dropout = args.dropout,
            lr = args.lr,
            grad_cap = args.grad_clip
        )

        session.run(tf.initialize_all_variables())

        if args.save_model or args.load_model:
            saver = tf.train.Saver()

        if args.load_model:
            try:
                saver.restore(session, utils.SAVER_FILE)
            except ValueError:
                print("Cant find model file")
                sys.exit(1)
        while True:
            offset = 0.
            for dataset_part in utils.iterate_over_dataset(dataset, batch_size*args.disc_count):
                print("Start train discriminator wih offset {}...".format(offset))
                for ind, batch in enumerate(utils.iterate_over_dataset(dataset_part, batch_size)):
                    noise = np.random.random(size=(batch_size, noise_size))
                    cost = lstm_gan.train_disc_on_batch(session, noise, batch)
                    print("Processed {} sentences with train cost = {}".format((ind+1)*batch_size, cost))

                print("Start train generator...")
                for ind in range(args.gen_count):
                    noise = np.random.random(size=(batch_size, noise_size))
                    cost = lstm_gan.train_gen_on_batch(session, noise)
                    if args.gen_sent:
                        sent = lstm_gan.generate_sent(session, np.random.random(size=(noise_size, )))
                        print(' '.join(index2word[i] for i in sent))
                    print("Processed {} noise inputs with train cost {}".format((ind+1)*batch_size, cost))

                offset += batch_size*args.disc_count
                if args.save_model:
                    saver.save(sess, utils.SAVER_FILE)
                    print("Model saved")
项目:stock_tournament    作者:howland    | 项目源码 | 文件源码
def tournament(parameters):
    X, Y, meta = utils.load_dataset(parameters['prefix'])
    print("finished load")
    # model_test = model.TournamentNn(input_dim=X.shape[1], labels_dim=Y.shape[1], prefix=parameters['prefix'], restore=True)
    model_test = model.TournamentNn(input_dim=X.shape[1], labels_dim=Y.shape[1], prefix=parameters['prefix'])
    model_test.train(X, Y, prefix=parameters['prefix'], batch=True)
    print("loaded tf model")
    # test_query = model_test.predict(np.arange(0,40))

    print("todo")
    # for date in parameters['tournament_dates']:
    t_X = []
    t_returns = {}
    for company in parameters['company_list']:
        temp_returns = []
        # temp_dates = []
        for val in parameters['data'][company]:
            # temp_dates.append(val['date'])
            temp_returns.append(val['return'])
        # Add date and check it here!!
        t_returns[company] = temp_returns[-parameters['compare_length']:]

    # Perform multiple rounds of the tournament
    win_count = {}
    for i in range(2000):
        currently_standing = parameters['company_list']
        while(len(currently_standing) > 3):
            random.shuffle(currently_standing)
            pairs = []
            for j in range(math.floor(len(currently_standing)/2)):
                pairs.append([currently_standing[2*j], currently_standing[(2*j)+1]])

            pairs = np.array(pairs)
            pair_examples = []
            for p in pairs:
                ex_input = tournament_pipeline.generate_x(t_returns[p[0]][-20:],t_returns[p[1]][-20:])
                pair_examples.append(ex_input)
            pair_examples = np.array(pair_examples)
            round_results = np.argmax(model_test.predict(pair_examples), axis=1)

            remaining = []
            for i in range(round_results.shape[0]):
                if not round_results[i] == 2: # In case model picked neither
                    remaining.append(pairs[i][round_results[i]])

            # if round_results.shape[0] < len(currently_standing):
                # remaining.append(currently_standing[-1])
            currently_standing = remaining
        for c in currently_standing:
            if c in win_count:
                win_count[c] = win_count[c] + 1
            else:
                win_count[c] = 1
    sorted_x = sorted(win_count.items(), key=operator.itemgetter(1))
    print(sorted_x)