Python chainer.serializers 模块,load_npz() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.serializers.load_npz()

项目:chainer_pong    作者:icoxfog417    | 项目源码 | 文件源码
def __init__(self, actions, epsilon=1, n_history=4, on_gpu=False, model_path="", load_if_exist=True):
        self.actions = actions
        self.epsilon = epsilon
        self.q = Q(n_history, len(actions), on_gpu)
        self._state = []
        self._observations = [
            np.zeros((self.q.SIZE, self.q.SIZE), np.float32), 
            np.zeros((self.q.SIZE, self.q.SIZE), np.float32)
        ]  # now & pre
        self.last_action = 0
        self.model_path = model_path if model_path else os.path.join(os.path.dirname(__file__), "./store")
        if not os.path.exists(self.model_path):
            print("make directory to store model at {0}".format(self.model_path))
            os.mkdir(self.model_path)
        else:
            models = self.get_model_files()
            if load_if_exist and len(models) > 0:
                print("load model file {0}.".format(models[-1]))
                serializers.load_npz(os.path.join(self.model_path, models[-1]), self.q)  # use latest model
项目:chainer-object-detection    作者:dsanno    | 项目源码 | 文件源码
def __init__(self, model_path, config):
        # hyper parameters
        self.n_boxes = 5
        self.config = config
        self.labels = config['categories']
        self.n_classes = len(self.labels)
        self.detection_thresh = config['confidence']
        self.iou_thresh = config['iou']
        anchors = config['anchors']
        # load model
        print('loading model...')
        yolov2 = YOLOv2(n_classes=self.n_classes, n_boxes=self.n_boxes)
        serializers.load_npz(model_path, yolov2)
        model = YOLOv2Predictor(yolov2)
        model.init_anchor(anchors)
        model.predictor.finetune = False
        self.model = model
项目:chainer-object-detection    作者:dsanno    | 项目源码 | 文件源码
def __init__(self, model_path, config):
        # hyper parameters
        self.n_boxes = 5
        self.config = config
        self.labels = config['categories']
        self.n_classes = len(self.labels)
        self.detection_thresh = config['confidence']
        self.iou_thresh = config['iou']
        anchors = config['anchors']
        # load model
        print('loading model...')
        yolov2 = YOLOv2(n_classes=self.n_classes, n_boxes=self.n_boxes)
        serializers.load_npz(model_path, yolov2)
        model = YOLOv2Predictor(yolov2)
        model.init_anchor(anchors)
        model.predictor.finetune = False
        self.model = model
项目:LSTMVAE    作者:ashwatthaman    | 项目源码 | 文件源码
def test(args,encdec,model_name,categ_arr=[],predictFlag=False):
    serializers.load_npz(model_name,encdec)
    if args.gpu>=0:
        import cupy as cp
        global xp;xp=cp
        encdec.to_gpu()
    encdec.setBatchSize(args.batchsize)

    if "cvae" in model_name:
        for categ in categ_arr:
            print("categ:{}".format(encdec.categ_vocab.itos(categ)))
            if predictFlag:
                encdec.predict(args.batchsize,tag=categ,randFlag=False)
    elif predictFlag:
        encdec.predict(args.batchsize,randFlag=False)
    return encdec
项目:LSTMVAE    作者:ashwatthaman    | 项目源码 | 文件源码
def loadModel(self,model_name_base,args):
        first_e = 0
        model_name = ""
        for e in range(args.epoch):
            model_name_tmp = model_name_base.format(args.dataname, args.dataname, e,args.n_latent)
            if os.path.exists(model_name_tmp):
                model_name = model_name_tmp
                self.setEpochNow(e + 1)

        if os.path.exists(model_name):
            print(model_name)
            # serializers.load_npz(model_name, encdec)
            serializers.load_npz(model_name, self)
            print("loaded_{}".format(model_name))
            first_e = self.epoch_now
        else:
            print("loadW2V")
            if os.path.exists(args.premodel):
                self.loadW(args.premodel)
            else:
                print("wordvec model doesnt exists.")
        return first_e
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def loadInfo(self, folder, model, state, smanager):
        if(not os.path.exists(folder)):
            return (model, state, 1)
        list_files = []
        model_name = model.getName()
        for file in os.listdir(folder):
            if(file.startswith(model_name) and file.endswith(".state")):
                list_files.append(file)
        if(len(list_files) > 0):
            sorted_list = self.natural_sort(list_files)
            fname_state = sorted_list[-1]

            bname = re.split('\.',fname_state)[0]
            fname_model = bname + '.model'
            fname_stats = bname + '.stats'
            epoch = int(re.split('_|\.', bname)[-1]) + 1
            serializers.load_npz(folder + '/' + fname_state, state)
            serializers.load_npz(folder + '/' + fname_model, model)
            smanager.load(folder + '/' + fname_stats)

        else:
            epoch = 1
            # no prev. models...
        return (model, state, epoch)
项目:deel    作者:uei    | 项目源码 | 文件源码
def __init__(self,modelpath='misc/VGG16_faster_rcnn_final.model',
                    mean=[102.9801, 115.9465, 122.7717],
                    in_size=224):
        super(FasterRCNN,self).__init__('FasterRCNN',in_size)
        self.func = FRCNN(Deel.gpu)
        self.func.train=False
        serializers.load_npz('misc/VGG16_faster_rcnn_final.model', self.func)

        ImageNet.mean_image = np.ndarray((3, 256, 256), dtype=np.float32)
        ImageNet.mean_image[0] = mean[0]
        ImageNet.mean_image[1] = mean[1]
        ImageNet.mean_image[2] = mean[2]
        ImageNet.in_size = in_size

        self.labels = CLASSES

        self.batchsize = 1
        xp = Deel.xp
        self.x_batch = xp.ndarray((self.batchsize, 3, self.in_size, self.in_size), dtype=np.float32)

        if Deel.gpu >=0:
            self.func = self.func.to_gpu(Deel.gpu)
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.func)
项目:mlimages    作者:icoxfog417    | 项目源码 | 文件源码
def predict(limit):
    _limit = limit if limit > 0 else 5

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    serializers.load_npz(MODEL_FILE, model)

    i = 0
    for arr, im in td.generate():
        x = np.ndarray((1,) + arr.shape, arr.dtype)
        x[0] = arr
        x = chainer.Variable(np.asarray(x), volatile="on")
        y = model.predict(x)
        p = np.argmax(y.data)
        print("predict {0}, actual {1}".format(label_def[p], label_def[im.label]))
        im.image.show()
        i += 1
        if i >= _limit:
            break
项目:cgp-cnn    作者:sg-nm    | 项目源码 | 文件源码
def test(self, cgp, model_file, comp_graph='comp_graph.dot', batchsize=256):
        chainer.cuda.get_device(0).use()  # Make a specified GPU current
        model = CGP2CNN(cgp, self.n_class)
        print('\tLoad model from', model_file)
        serializers.load_npz(model_file, model)
        model.to_gpu(0)
        test_accuracy, test_loss = self.__test(model, batchsize)
        print('\tparamNum={}'.format(model.param_num))
        print('\ttest mean loss={}, test accuracy={}'.format(test_loss / self.test_data_num, test_accuracy / self.test_data_num))

        if comp_graph is not None:
            with open(comp_graph, 'w') as o:
                g = computational_graph.build_computational_graph((model.loss,))
                o.write(g.dump())
                del g
                print('\tCNN graph generated ({}).'.format(comp_graph))

        return test_accuracy, test_loss
项目:voxcelchain    作者:hiroaki-kaneda    | 项目源码 | 文件源码
def main():
    model = voxelchain.VoxelChain()
    serializers.load_npz('result/VoxelChain.model',model)
    use_model(model)
项目:voxcelchain    作者:hiroaki-kaneda    | 项目源码 | 文件源码
def main():
    model = voxelchain.VoxelChain()
    serializers.load_npz('result/VoxelChain.model',model)
    conv1(model)
    conv2(model)
    create_graph()
项目:chainer-object-detection    作者:dsanno    | 项目源码 | 文件源码
def main():
    args = parse_args()

    print("loading classifier model...")
    input_model = YOLOv2Classifier(args.input_class)
    serializers.load_npz(args.input_path, input_model)

    model = YOLOv2(args.output_class, args.box)
    copy_conv_layer(input_model, model, partial_layer)
    copy_bias_layer(input_model, model, partial_layer)
    copy_bn_layer(input_model, model, partial_layer)

    print("saving model to %s" % (args.output_path))
    serializers.save_npz(args.output_path, model)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def load_npz_no_strict(filename, obj):
    try:
        serializers.load_npz(filename, obj)
    except KeyError as e:
        warnings.warn(repr(e))
        with numpy.load(filename) as f:
            d = serializers.NpzDeserializer(f, strict=False)
            d.load(obj)
项目:chainer-faster-rcnn    作者:mitmul    | 项目源码 | 文件源码
def get_model(gpu):
    model = FasterRCNN(gpu)
    model.train = False
    serializers.load_npz('data/VGG16_faster_rcnn_final.model', model)

    return model
项目:trainer    作者:nutszebra    | 项目源码 | 文件源码
def model_init(self):
        load_model = self.load_model
        model = self.model
        gpu = self.gpu
        if load_model is None:
            print('ReLU weight initialization')
            model.weight_initialization()
        else:
            print('loading ' + self.load_model)
            serializers.load_npz(load_model, model)
        model.check_gpu(gpu)
项目:trainer    作者:nutszebra    | 项目源码 | 文件源码
def model_init(self):
        load_model = self.load_model
        model = self.model
        gpu = self.gpu
        if load_model is None:
            print('ReLU weight initialization')
            model.weight_initialization()
        else:
            print('loading ' + self.load_model)
            serializers.load_npz(load_model, model)
        model.check_gpu(gpu)
项目:trainer    作者:nutszebra    | 项目源码 | 文件源码
def model_init(self):
        load_model = self.load_model
        model = self.model
        gpu = self.gpu
        if load_model is None:
            print('ReLU weight initialization')
            model.weight_initialization()
        else:
            print('loading ' + self.load_model)
            serializers.load_npz(load_model, model)
        model.check_gpu(gpu)
项目:trainer    作者:nutszebra    | 项目源码 | 文件源码
def model_init(model, load_model):
        if load_model is None:
            print('Weight initialization')
            model.weight_initialization()
        else:
            print('loading {}'.format(load_model))
            serializers.load_npz(load_model, model)
项目:trainer    作者:nutszebra    | 项目源码 | 文件源码
def load_model(self, path=''):
        serializers.load_npz(path, self)
项目:trainer    作者:nutszebra    | 项目源码 | 文件源码
def model_init(self):
        load_model = self.load_model
        model = self.model
        gpu = self.gpu
        if load_model is None:
            print('ReLU weight initialization')
            model.weight_initialization()
        else:
            print('loading ' + self.load_model)
            serializers.load_npz(load_model, model)
        model.check_gpu(gpu)
项目:context2vec    作者:orenmel    | 项目源码 | 文件源码
def read_lstm_model(self, params, train):

        assert train == False # reading a model to continue training is currently not supported

        words_file = params['config_path'] + params['words_file']
        model_file = params['config_path'] + params['model_file']
        unit = int(params['unit'])
        deep = (params['deep'] == 'yes')
        drop_ratio = float(params['drop_ratio'])

        #read and normalize target word embeddings
        w, word2index, index2word = self.read_words(words_file) 
        s = numpy.sqrt((w * w).sum(1))
        s[s==0.] = 1.
        w /= s.reshape((s.shape[0], 1))  # normalize

        context_word_units = unit
        lstm_hidden_units = IN_TO_OUT_UNITS_RATIO*unit
        target_word_units = IN_TO_OUT_UNITS_RATIO*unit

        cs = [1 for _ in range(len(word2index))] # dummy word counts - not used for eval
        loss_func = L.NegativeSampling(target_word_units, cs, NEGATIVE_SAMPLING_NUM) # dummy loss func - not used for eval

        model = BiLstmContext(deep, self.gpu, word2index, context_word_units, lstm_hidden_units, target_word_units, loss_func, train, drop_ratio)
        S.load_npz(model_file, model)

        return w, word2index, index2word, model
项目:DeepPoseComparison    作者:ynaka81    | 项目源码 | 文件源码
def __init__(self, Nj, gpu, model_file, filename):
        # initialize model to estimate.
        self.model = AlexNet(Nj)
        self.gpu = gpu
        serializers.load_npz(model_file, self.model)
        # prepare gpu.
        if self.gpu >= 0:
            chainer.cuda.get_device(gpu).use()
            self.model.to_gpu()
        # load dataset to estimate.
        self.dataset = PoseDataset(filename)
项目:DeepPoseComparison    作者:ynaka81    | 项目源码 | 文件源码
def __init__(self, Nj, gpu, model_file, filename):
        # initialize model to estimate.
        self.model = AlexNet(Nj, use_visibility=True)
        self.gpu = gpu
        serializers.load_npz(model_file, self.model)
        # prepare gpu.
        if self.gpu >= 0:
            chainer.cuda.get_device(gpu).use()
            self.model.to_gpu()
        # load dataset to estimate.
        self.dataset = PoseDataset(filename)
项目:rnn-morpheme-analyzer    作者:mitaki28    | 项目源码 | 文件源码
def load(load_dir, epoch):
    with (load_dir/meta_name).open('rb') as f:
        storage = Storage(*np.load(f)[0])
    serializers.load_npz(
        str(load_dir/model_name(epoch)),
        storage.model
    )
    serializers.load_npz(
        str(load_dir/optimizer_name(epoch)),
        storage.optimizer
    )
    return storage
项目:rnn-morpheme-analyzer    作者:mitaki28    | 项目源码 | 文件源码
def load(load_dir, epoch):
    with (load_dir/meta_name).open('rb') as f:
        storage = Storage(*np.load(f)[0])
    serializers.load_npz(
        str(load_dir/model_name(epoch)),
        storage.model
    )
    serializers.load_npz(
        str(load_dir/optimizer_name(epoch)),
        storage.optimizer
    )
    return storage
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def load_chain_model(self, **kwargs):
        name = self.get_name(**kwargs)
        path = '{}/{}'.format(self.folder,name)
        epoch = int(kwargs.get("nepochs",2))
        fn = "{}/chain_snapshot_epoch_{:06}".format(path,epoch)

        chain, model = self.setup_chain_model(**kwargs)
        S.load_npz(fn, chain)
        return chain, model
项目:deel    作者:uei    | 项目源码 | 文件源码
def get_model(gpu):
    model = FasterRCNN(gpu)
    model.train = False
    serializers.load_npz('misc/VGG16_faster_rcnn_final.model', model)

    return model
项目:chainer-pix2pix    作者:wuhuikai    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser(description='pix2pix --- GAN for Image to Image translation')
    parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--load_size', type=int, default=256, help='Scale image to load_size')
    parser.add_argument('--g_filter_num', type=int, default=64, help="# of filters in G's 1st conv layer")
    parser.add_argument('--d_filter_num', type=int, default=64, help="# of filters in D's 1st conv layer")
    parser.add_argument('--output_channel', type=int, default=3, help='# of output image channels')
    parser.add_argument('--n_layers', type=int, default=3, help='# of hidden layers in D')
    parser.add_argument('--list_path', default='list/val_list.txt', help='Path for test list')
    parser.add_argument('--out', default='result/test', help='Directory to output the result')
    parser.add_argument('--G_path', default='result/G.npz', help='Path for pretrained G')
    args = parser.parse_args()

    if not os.path.isdir(args.out):
        os.makedirs(args.out)

    # Set up GAN G
    G = Generator(args.g_filter_num, args.output_channel)
    serializers.load_npz(args.G_path, G)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        G.to_gpu()                               # Copy the model to the GPU

    with open(args.list_path) as f:
        imgs = f.readlines()

    total = len(imgs)
    for idx, img_path in enumerate(imgs):
        print('{}/{} ...'.format(idx+1, total))

        img_path = img_path.strip().split(' ')[-1]
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)[:, :, ::-1]
        h, w, _ = img.shape
        img = np.asarray(Image.fromarray(img).resize((args.load_size, args.load_size), resample=Image.NEAREST), dtype=np.float32)
        img = np.transpose(img, (2, 0, 1))

        A = data_process([img], device=args.gpu, volatile='on')
        B = np.squeeze(output2img(G(A, test=True, dropout=False)))

        Image.fromarray(B).resize((w, h), resample=Image.BILINEAR).save(os.path.join(args.out, os.path.basename(img_path).replace('gtFine_labelIds', 'leftImg8bit')))
项目:chainer-ADDA    作者:pfnet-research    | 项目源码 | 文件源码
def main(args):
    # get datasets
    source_train, source_test = chainer.datasets.get_svhn()
    target_train, target_test = chainer.datasets.get_mnist(ndim=3, rgb_format=True)
    source = source_train, source_test

    # resize mnist to 32x32
    def transform(in_data):
        img, label = in_data
        img = resize(img, (32, 32))
        return img, label

    target_train = TransformDataset(target_train, transform)
    target_test = TransformDataset(target_test, transform)

    target = target_train, target_test

    # load pretrained source, or perform pretraining
    pretrained = os.path.join(args.output, args.pretrained_source)
    if not os.path.isfile(pretrained):
        source_cnn = pretrain_source_cnn(source, args)
    else:
        source_cnn = Loss(num_classes=10)
        serializers.load_npz(pretrained, source_cnn)

    # how well does this perform on target domain?
    test_pretrained_on_target(source_cnn, target, args)

    # initialize the target cnn (do not use source_cnn.copy)
    target_cnn = Loss(num_classes=10)
    # copy parameters from source cnn to target cnn
    target_cnn.copyparams(source_cnn)

    train_target_cnn(source, target, source_cnn, target_cnn, args)
项目:chainer_sklearn    作者:corochann    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser(description='Regression predict')
    parser.add_argument('--modelpath', '-m', default='result/mlp.model',
                        help='Model path to be loaded')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--unit', '-u', type=int, default=50,
                        help='Number of units')
    args = parser.parse_args()

    batchsize = 128

    # Load dataset
    data, target = load_data()
    X = data.reshape((-1, 1)).astype(np.float32)
    y = target.reshape((-1, 1)).astype(np.float32)

    # Load trained model
    model = SklearnWrapperRegressor(MLP(args.unit, 1), device=args.gpu)
    serializers.load_npz(args.modelpath, model)

    # --- Example 1. Predict all test data ---
    outputs = model.predict(X,
                            batchsize=batchsize,
                            retain_inputs=False,)

    # --- Plot result ---
    plt.figure()
    plt.scatter(X, y, label='actual')
    plt.plot(X, outputs, label='predict', color='red')
    plt.legend()
    plt.show()
    plt.savefig('predict.png')
项目:chainer_frmqn    作者:okdshin    | 项目源码 | 文件源码
def load(self, name):
        serializers.load_npz(name+".model", self.dqn.model)
        serializers.load_npz(name+".optimizer", self.dqn.optimizer)
项目:chainer-qrnn    作者:butsugiri    | 项目源码 | 文件源码
def main(args):
    # load config file and obtain embed dimension and hidden dimension
    with open(args.config_path, 'r') as fi:
        config = json.load(fi)
        embed_dim = config["dim"]
        hidden_dim = config["unit"]
        print("Embedding Dimension: {}\nHidden Dimension: {}\n".format(embed_dim, hidden_dim), file=sys.stderr)

    # load data
    dp = DataProcessor(data_path=config["data"], test_run=False)
    dp.prepare_dataset()

    # create model
    vocab = dp.vocab
    model = RecNetClassifier(QRNNLangModel(n_vocab=len(vocab), embed_dim=embed_dim, out_size=hidden_dim))

    # load parameters
    print("loading paramters to model...", end='', file=sys.stderr, flush=True)
    S.load_npz(filename=args.model_path, obj=model)
    print("done.", file=sys.stderr, flush=True)

    # create iterators from loaded data
    bprop_len = config["bproplen"]
    test_data = dp.test_data
    test_iter = ParallelSequentialIterator(test_data, 1, repeat=False, bprop_len=bprop_len)

    # evaluate the model
    print('testing...', end='', file=sys.stderr, flush=True)
    model.predictor.reset_state()
    model.predictor.train = False
    evaluator = extensions.Evaluator(test_iter, model, converter=convert)
    result = evaluator()
    print('done.\n', file=sys.stderr, flush=True)
    print('Perplexity: {}'.format(np.exp(float(result['main/loss']))), end='', file=sys.stderr, flush=True)
项目:cv-api    作者:yasunorikudo    | 项目源码 | 文件源码
def __init__(self):
        self._model = FastStyleNet()
        serializers.load_npz('composition.model', self._model)
        cuda.get_device(0).use()
        self._model.to_gpu()
项目:machine_learning_in_application    作者:icoxfog417    | 项目源码 | 文件源码
def load_model(self, model):
        if not os.path.exists(self.model_path):
            raise Exception("model file directory does not exist.")

        suffix = ".model"
        keyword = model.__class__.__name__.lower()
        candidates = []
        for f in os.listdir(self.model_path):
            if keyword in f and f.endswith(suffix):
                candidates.append(f)
        candidates.sort()
        latest = candidates[-1]
        #print("targets {}, pick up {}.".format(candidates, latest))
        model_file = os.path.join(self.model_path, latest)
        serializers.load_npz(model_file, model)
项目:chainer-fast-neuralstyle-video    作者:gafr    | 项目源码 | 文件源码
def _transform(in_image,loaded,m_path):
    if m_path == 'none':
        return in_image
    if not loaded:
        serializers.load_npz(m_path, model)
        if RUN_ON_GPU:
            cuda.get_device(0).use() #assuming only one core
            model.to_gpu()
        print "loaded"

    xp = np if not RUN_ON_GPU else cuda.cupy

    image = xp.asarray(in_image, dtype=xp.float32).transpose(2, 0, 1)
    image = image.reshape((1,) + image.shape)
    image -= 120

    x = Variable(image)
    y = model(x)

    result = cuda.to_cpu(y.data)
    result = result.transpose(0, 2, 3, 1)
    result = result.reshape((result.shape[1:]))
    result += 120
    result = np.uint8(result)

    return result
项目:chainer-EWC    作者:okdshin    | 项目源码 | 文件源码
def train_tasks_continuosly(
        args, model, train, test, train2, test2, enable_ewc):
    # Train Task A or load trained model
    if os.path.exists("mlp_taskA.model") or args.skip_taskA:
        print("load taskA model")
        serializers.load_npz("./model50/mlp_taskA.model", model)
    else:
        print("train taskA")
        train_task(args, "train_task_a"+("_with_ewc" if enable_ewc else ""),
                   model, args.epoch, train,
                   {"TaskA": test}, args.batchsize)
        print("save the model")
        serializers.save_npz("mlp_taskA.model", model)

    if enable_ewc:
        print("enable EWC")
        model.compute_fisher(train)
        model.store_variables()

    # Train Task B
    print("train taskB")
    train_task(args, "train_task_ab"+("_with_ewc" if enable_ewc else ""),
               model, args.epoch, train2,
               {"TaskA": test, "TaskB": test2}, args.batchsize)
    print("save the model")
    serializers.save_npz(
            "mlp_taskAB"+("_with_ewc" if enable_ewc else "")+".model", model)
项目:blstm-cws    作者:chantera    | 项目源码 | 文件源码
def parse(model_file, embed_file):

    # Load files
    Log.i('initialize preprocessor with %s' % embed_file)
    processor = Preprocessor(embed_file)

    Log.v('')
    Log.v("initialize ...")
    Log.v('')

    with np.load(model_file) as f:
        embeddings = np.zeros(f['embed/W'].shape, dtype=np.float32)

    # Set up a neural network
    cls = BLSTMCRF if _use_crf else BLSTM
    model = cls(
        embeddings=embeddings,
        n_labels=4,
        dropout=0.2,
        train=False,
    )
    Log.i("loading a model from %s ..." % model_file)
    serializers.load_npz(model_file, model)

    LABELS = ['B', 'M', 'E', 'S']

    def _process(raw_text):
        if not raw_text:
            return
        xs = [processor.transform_one([c for c in raw_text])]
        ys = model.parse(xs)
        labels = [LABELS[y] for y in ys[0]]
        print(' '.join(labels))
        seq = []
        for c, label in zip(raw_text, labels):
            seq.append(c)
            if label == 'E' or label == 'S':
                seq.append(' ')
        print(''.join(seq))
        print('-')

    print("Input a Chinese sentence! (use 'q' to exit)")
    while True:
        x = input()
        if x == 'q':
            break
        _process(x)
项目:chainer-stack-gan    作者:dsanno    | 项目源码 | 文件源码
def main():
    args = parse_args()
    gen = net.Generator1()
    dis = net.Discriminator1()
    clip_rect = None
    if args.clip_rect:
        clip_rect = map(int, args.clip_rect.split(','))
        clip_rect = tuple([clip_rect[0], clip_rect[1], clip_rect[0] + clip_rect[2], clip_rect[1] + clip_rect[3]])

    gpu_device = None
    if args.gpu >= 0:
        device_id = args.gpu
        cuda.get_device(device_id).use()
        gen.to_gpu(device_id)
        dis.to_gpu(device_id)

    optimizer_gen = optimizers.Adam(alpha=0.001)
    optimizer_gen.setup(gen)
    optimizer_dis = optimizers.Adam(alpha=0.001)
    optimizer_dis.setup(dis)

    if args.input != None:
        serializers.load_npz(args.input + '.gen.model', gen)
        serializers.load_npz(args.input + '.gen.state', optimizer_gen)
        serializers.load_npz(args.input + '.dis.model', dis)
        serializers.load_npz(args.input + '.dis.state', optimizer_dis)

    if args.out_image_dir != None:
        if not os.path.exists(args.out_image_dir):
            try:
                os.mkdir(args.out_image_dir)
            except:
                print 'cannot make directory {}'.format(args.out_image_dir)
                exit()
        elif not os.path.isdir(args.out_image_dir):
            print 'file path {} exists but is not directory'.format(args.out_image_dir)
            exit()

    with open(args.dataset, 'rb') as f:
        images = pickle.load(f)

    train(gen, dis, optimizer_gen, optimizer_dis, images, args.epoch, batch_size=args.batch_size, margin=args.margin, save_epoch=args.save_epoch, lr_decay=args.lr_decay, output_path=args.output, out_image_dir=args.out_image_dir, clip_rect=clip_rect)
项目:chainer-stack-gan    作者:dsanno    | 项目源码 | 文件源码
def main():
    args = parse_args()
    gen1 = net.Generator1()
    gen2 = net.Generator2()
    dis = net.Discriminator2()
    clip_rect = None
    if args.clip_rect:
        clip_rect = map(int, args.clip_rect.split(','))
        clip_rect = tuple([clip_rect[0], clip_rect[1], clip_rect[0] + clip_rect[2], clip_rect[1] + clip_rect[3]])

    device_id = None
    if args.gpu >= 0:
        device_id = args.gpu
        cuda.get_device(device_id).use()
        gen1.to_gpu(device_id)
        gen2.to_gpu(device_id)
        dis.to_gpu(device_id)

    optimizer_gen = optimizers.Adam(alpha=0.001)
    optimizer_gen.setup(gen2)
    optimizer_dis = optimizers.Adam(alpha=0.001)
    optimizer_dis.setup(dis)

    serializers.load_npz(args.stack1 + '.gen.model', gen1)
    if args.input != None:
        serializers.load_npz(args.input + '.gen.model', gen2)
        serializers.load_npz(args.input + '.gen.state', optimizer_gen)
        serializers.load_npz(args.input + '.dis.model', dis)
        serializers.load_npz(args.input + '.dis.state', optimizer_dis)

    if args.out_image_dir != None:
        if not os.path.exists(args.out_image_dir):
            try:
                os.mkdir(args.out_image_dir)
            except:
                print 'cannot make directory {}'.format(args.out_image_dir)
                exit()
        elif not os.path.isdir(args.out_image_dir):
            print 'file path {} exists but is not directory'.format(args.out_image_dir)
            exit()

    with open(args.dataset, 'rb') as f:
        images = pickle.load(f)

    train(gen1, gen2, dis, optimizer_gen, optimizer_dis, images, args.epoch, batch_size=args.batch_size, margin=args.margin, save_epoch=args.save_epoch, lr_decay=args.lr_decay, output_path=args.output, out_image_dir=args.out_image_dir, clip_rect=clip_rect)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def inference():
    cap = cv2.VideoCapture(0)


    # load model
    model = Netmodel('eval-model', CLASSES)
    serializers.load_npz(MODEL_NAME, model)
    cuda.get_device(GPU_ID).use()
    model.to_gpu()

    LUT = fromHEX2RGB(stats_opts['colormap'] )
    fig3, axarr3 = plt.subplots(1, 1)

    batchRGB = np.zeros((1, 3, NEWSIZE[1], NEWSIZE[0]), dtype='float32')

    while(True):
            # Capture frame-by-frame
            ret, frame = cap.read()

        # process frame
        im = misc.imresize(frame, NEWSIZE, interp='bilinear')
        # convertion from HxWxCH to CHxWxH
        batchRGB[0,:,:,:] = im.astype(np.float32).transpose((2,1,0))
        batchRGBn = batchRGB  - 127.0

        # data ready
        batch = chainer.Variable(cuda.cupy.asarray(batchRGBn))

        # make predictions
        model((batch, []), test_mode=2)
        pred = model.probs.data.argmax(1)
        # move data back to CPU
        pred_ = cuda.to_cpu(pred)

        pred_ = LUT[pred_+1,:].squeeze()
        pred_ = pred_.transpose((1,0,2))
        pred2 = cv2.cvtColor(pred_, cv2.COLOR_BGR2RGB)

            # Display the resulting frame
        cv2.imshow('frame',frame)
        cv2.imshow('pred',pred2)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def inference():
    cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
    #cv2.namedWindow('pred', cv2.WINDOW_NORMAL)


    # load model
    model = Netmodel('eval-model', CLASSES)
    serializers.load_npz(MODEL_NAME, model)
    cuda.get_device(GPU_ID).use()
    model.to_gpu()

    LUT = fromHEX2RGB(stats_opts['colormap'] )
    fig3, axarr3 = plt.subplots(1, 1)

    batchRGB = np.zeros((1, 3, NEWSIZE[1], NEWSIZE[0]), dtype='float32')

    # go throught the data
    flist = []
    with open(TESTFILE) as f:
        for line in f:
            cline = re.split('\n',line)

            #print(cline[0])
            frame = misc.imread(cline[0])
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # process frame
            im = misc.imresize(frame, NEWSIZE, interp='bilinear')
            # convertion from HxWxCH to CHxWxH
            batchRGB[0,:,:,:] = im.astype(np.float32).transpose((2,1,0))
            batchRGBn = batchRGB  - 127.0

            # data ready
            batch = chainer.Variable(cuda.cupy.asarray(batchRGBn))

            # make predictions
            model((batch, []), test_mode=2)
            pred = model.probs.data.argmax(1)
            # move data back to CPU
            pred_ = cuda.to_cpu(pred)

            pred_ = LUT[pred_+1,:].squeeze()
            pred_ = pred_.transpose((1,0,2))
            pred2 = cv2.cvtColor(pred_, cv2.COLOR_BGR2RGB)

            #ipdb.set_trace()
            disp = (0.4*im + 0.6*pred2).astype(np.uint8)

                # Display the resulting frame
            cv2.imshow('frame',disp)
            #cv2.imshow('pred',pred2)
                if cv2.waitKey(-1) & 0xFF == ord('q'):
                    break
    cv2.destroyAllWindows()