Python model 模块,load_model() 实例源码

我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用model.load_model()

项目:FaceRecoginition    作者:ProHiryu    | 项目源码 | 文件源码
def test_file():
    count = 1
    face_cascade = cv2.CascadeClassifier(
        '/usr/local/opt/opencv3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')

    argvs = sys.argv
    for argv in argvs[1:]:
        img = cv2.imread(argv)

        if type(img) != str:
            try:
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                print('convert succeed')
            except:
                print('can not convert to gray image')
                continue
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                f = cv2.resize(gray[y:(y + h), x:(x + w)], (128, 128))
                model = load_model('/Users/songheqi/model/model.h5')
                num, acc = predict(model, f, 128)
                name_list = read_name_list('/Users/songheqi/train_set/')
                print('The {} picture is '.format(count) +
                      name_list[num] + ' acc : ', acc)
                count += 1
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def main():
    model = load_model(args.model_dir)
    assert model is not None

    vocab, vocab_inv = load_vocab(args.model_dir)
    assert vocab is not None
    assert vocab_inv is not None

    vocab_size = model.vocab_size

    with chainer.using_config("train", False):
        for n in range(args.num_generate):
            word_ids = np.arange(0, vocab_size, dtype=np.int32)
            token = ID_BOS
            x = np.asarray([[token]]).astype(np.int32)
            model.reset_state()
            while token != ID_EOS and x.shape[1] < args.max_sentence_length:
                u = model.forward_one_step(x)
                p = F.softmax(u).data[-1]
                token = np.random.choice(word_ids, size=1, p=p)
                x = np.append(x, np.asarray([token]).astype(np.int32), axis=1)

            sentence = []
            for token in x[0]:
                word = vocab_inv[token]
                sentence.append(word)
            print(" ".join(sentence))
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def main(args):
    model = load_model(args.model_dir)
    assert model is not None

    vocab, vocab_inv = load_vocab(args.model_dir)
    assert vocab is not None
    assert vocab_inv is not None

    vocab_size = model.vocab_size

    with chainer.using_config("train", False):
        for n in xrange(args.num_generate):
            word_ids = np.arange(0, vocab_size, dtype=np.int32)
            token = ID_BOS
            x = np.asarray([[token]]).astype(np.int32)
            model.reset_state()
            while token != ID_EOS and x.shape[1] < args.max_sentence_length:
                u = model.forward_one_step(x)
                p = F.softmax(u).data[-1]
                token = np.random.choice(word_ids, size=1, p=p)
                x = np.append(x, np.asarray([token]).astype(np.int32), axis=1)

            sentence = []
            for token in x[0]:
                word = vocab_inv[token]
                sentence.append(word)
            print(" ".join(sentence))
项目:FaceRecoginition    作者:ProHiryu    | 项目源码 | 文件源码
def test_camera():
    face_patterns = cv2.CascadeClassifier(
        '/usr/local/opt/opencv3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')

    cameraCapture = cv2.VideoCapture(0)
    success, frame = cameraCapture.read()

    while True:
        success, frame = cameraCapture.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # ????
        faces = face_patterns.detectMultiScale(gray, 1.3, 5)  # ????
        for (x, y, w, h) in faces:
            frame = cv2.rectangle(
                frame, (x, y), (x + w, y + h), (255, 0, 0), 2)  # ?????????????
            f = cv2.resize(gray[y:(y + h), x:(x + w)], (128, 128))
            model = load_model('/Users/songheqi/model/model.h5')
            num, acc = predict(model, f, 128)
            name_list = read_name_list('/Users/songheqi/train_set/')
            print('You are ' + name_list[num] + ' acc : ', acc)
        cv2.imshow("Camera", frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):  # ?‘q’???
            break

    cameraCapture.release()
    cv2.destroyAllWindows()
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def main():
    # ????????
    vocab, vocab_inv, BLANK = get_vocab()
    vocab_size = len(vocab)

    # ???????????????
    # GTX 1080 1???
    batchsizes = [96, 64, 64, 64, 64, 64, 64, 64, 48, 48, 48, 32, 32, 24, 24, 24, 24, 24, 24, 24, 24, 24]

    augmentation = AugmentationOption()
    if args.augmentation:
        augmentation.change_vocal_tract = True
        augmentation.change_speech_rate = True
        augmentation.add_noise = True

    model = load_model(args.model_dir)
    assert model is not None


    if args.gpu_device >= 0:
        chainer.cuda.get_device(args.gpu_device).use()
        model.to_gpu(args.gpu_device)
    xp = model.xp

    # ???
    with chainer.using_config("train", False):
        iterator = TestMinibatchIterator(wav_path_test, trn_path_test, cache_path, batchsizes, BLANK, buckets_limit=args.buckets_limit, option=augmentation, gpu=args.gpu_device >= 0)
        buckets_errors = []
        for batch in iterator:
            x_batch, x_length_batch, t_batch, t_length_batch, bucket_idx, progress = batch

            if args.filter_bucket_id and bucket_idx != args.filter_bucket_id:
                continue

            sys.stdout.write("\r" + stdout.CLEAR)
            sys.stdout.write("computing CER of bucket {} ({} %)".format(bucket_idx + 1, int(progress * 100)))
            sys.stdout.flush()

            y_batch = model(x_batch, split_into_variables=False)
            y_batch = xp.argmax(y_batch.data, axis=2)
            error = compute_minibatch_error(y_batch, t_batch, BLANK, print_sequences=True, vocab=vocab_inv)

            while bucket_idx >= len(buckets_errors):
                buckets_errors.append([])

            buckets_errors[bucket_idx].append(error)

        avg_errors = []
        for errors in buckets_errors:
            avg_errors.append(sum(errors) / len(errors))

        sys.stdout.write("\r" + stdout.CLEAR)
        sys.stdout.flush()

        print_bold("bucket  CER")
        for bucket_idx, error in enumerate(avg_errors):
            print("{}   {}".format(bucket_idx + 1, error * 100))
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def main(args):
    vocab, vocab_inv = load_vocab(args.model_dir)
    vocab_source, vocab_target = vocab
    vocab_inv_source, vocab_inv_target = vocab_inv

    source_dataset, target_dataset = read_data(vocab_source, vocab_target, args.source_train, None, args.source_dev, None, args.source_test, None, reverse_source=True)

    source_dataset_train, source_dataset_dev, source_dataset_test = source_dataset
    target_dataset_train, target_dataset_dev, target_dataset_test = target_dataset
    printb("data    #")
    if len(source_dataset_train) > 0:
        print("train    {}".format(len(source_dataset_train)))
    if len(source_dataset_dev) > 0:
        print("dev  {}".format(len(source_dataset_dev)))
    if len(source_dataset_test) > 0:
        print("test {}".format(len(source_dataset_test)))


    # split into buckets
    source_buckets_train = None
    if len(source_dataset_train) > 0:
        printb("buckets     #data   (train)")
        source_buckets_train = make_buckets(source_dataset_train)
        if args.buckets_slice is not None:
            source_buckets_train = source_buckets_train[:args.buckets_slice + 1]
        for size, data in zip(bucket_sizes, source_buckets_train):
            print("{}   {}".format(size, len(data)))

    source_buckets_dev = None
    if len(source_dataset_dev) > 0:
        printb("buckets     #data   (dev)")
        source_buckets_dev = make_buckets(source_dataset_dev)
        if args.buckets_slice is not None:
            source_buckets_dev = source_buckets_dev[:args.buckets_slice + 1]
        for size, data in zip(bucket_sizes, source_buckets_dev):
            print("{}   {}".format(size, len(data)))

    source_buckets_test = None
    if len(source_dataset_test) > 0:
        printb("buckets     #data   (test)")
        source_buckets_test = make_buckets(source_dataset_test)
        if args.buckets_slice is not None:
            source_buckets_test = source_buckets_test[:args.buckets_slice + 1]
        for size, data in zip(bucket_sizes, source_buckets_test):
            print("{}   {}".format(size, len(data)))

    # init
    model = load_model(args.model_dir)
    assert model is not None
    if args.gpu_device >= 0:
        cuda.get_device(args.gpu_device).use()
        model.to_gpu()

    if source_buckets_train is not None:
        dump_source_translation(model, source_buckets_train, vocab_inv_source, vocab_inv_target, beam_width=args.beam_width, normalization_alpha=args.alpha)

    if source_buckets_dev is not None:
        dump_source_translation(model, source_buckets_dev, vocab_inv_source, vocab_inv_target, beam_width=args.beam_width, normalization_alpha=args.alpha)

    if source_buckets_test is not None:
        dump_source_translation(model, source_buckets_test, vocab_inv_source, vocab_inv_target, beam_width=args.beam_width, normalization_alpha=args.alpha)