Python chainer.training.extensions 模块,LogReport() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用chainer.training.extensions.LogReport()

项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def main():
    model = L.Classifier(CNN())

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_mnist(ndim=3)
    train_iter = chainer.iterators.SerialIterator(train, batch_size=100)
    test_iter = chainer.iterators.SerialIterator(test, batch_size=100, repeat=False, shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (5, 'epoch'), out='result')

    trainer.extend(extensions.Evaluator(test_iter, model))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy','validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
项目:NlpUtil    作者:trtd56    | 项目源码 | 文件源码
def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):
        if opt_name == "Adam":
            opt = getattr(optimizers, opt_name)()
        else:
            opt = getattr(optimizers, opt_name)(lr)
        opt.setup(self.model)
        opt.add_hook(optimizer.GradientClipping(g_clip))

        updater = training.StandardUpdater(self.train_iter, opt, device=gpu)
        self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=out_dir)
        self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu))
        self.trainer.extend(extensions.dump_graph('main/loss'))
        self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))
        self.trainer.extend(extensions.LogReport())
        self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                                   'epoch', file_name='loss.png'))
        self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'],
                                                   'epoch', file_name='accuracy.png'))
        self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss',
                                                    'main/accuracy', 'validation/main/accuracy',
                                                    'elapsed_time']))
        self.trainer.extend(extensions.ProgressBar())
项目:chainer-pspnet    作者:mitmul    | 项目源码 | 文件源码
def __init__(self, **kwargs):
        required_keys = []
        optional_keys = [
            'dump_graph',
            'Evaluator',
            'ExponentialShift',
            'LinearShift',
            'LogReport',
            'observe_lr',
            'observe_value',
            'snapshot',
            'PlotReport',
            'PrintReport',
        ]
        super().__init__(
            required_keys, optional_keys, kwargs, self.__class__.__name__)
项目:chainer-pspnet    作者:mitmul    | 项目源码 | 文件源码
def __init__(self, **kwargs):
        required_keys = []
        optional_keys = [
            'dump_graph',
            'Evaluator',
            'ExponentialShift',
            'LinearShift',
            'LogReport',
            'observe_lr',
            'observe_value',
            'snapshot',
            'PlotReport',
            'PrintReport',
        ]
        super().__init__(
            required_keys, optional_keys, kwargs, self.__class__.__name__)
项目:instance_normalization_chainer    作者:crcrpar    | 项目源码 | 文件源码
def main(gpu_id=-1, bs=32, epoch=20, out='./result', resume=''):
    net = ShallowConv()
    model = L.Classifier(net)
    if gpu_id >= 0:
        chainer.cuda.get_device_from_id(gpu_id)
        model.to_gpu()
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_mnist(ndim=3)
    train_iter = chainer.iterators.SerialIterator(train, bs)
    test_iter = chainer.iterators.SerialIterator(
        test, bs, repeat=False, shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out=out)
    trainer.extend(extensions.ParameterStatistics(model.predictor))
    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id))
    trainer.extend(extensions.LogReport(log_name='parameter_statistics'))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
    trainer.extend(extensions.ProgressBar())

    if resume:
        chainer.serializers.load_npz(resume, trainer)

    trainer.run()
项目:instance_normalization_chainer    作者:crcrpar    | 项目源码 | 文件源码
def main(gpu_id=-1, bs=32, epoch=20, out='./not_layer_result', resume=''):
    net = ShallowConv()
    model = L.Classifier(net)
    if gpu_id >= 0:
        chainer.cuda.get_device_from_id(gpu_id)
        model.to_gpu()
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_mnist(ndim=3)
    train_iter = chainer.iterators.SerialIterator(train, bs)
    test_iter = chainer.iterators.SerialIterator(test, bs, repeat=False,
                                                 shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out=out)
    trainer.extend(extensions.ParameterStatistics(model.predictor))
    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
    trainer.extend(extensions.ProgressBar())

    if resume:
        chainer.serializers.load_npz(resume, trainer)

    trainer.run()
项目:signature-embedding    作者:hrantzsch    | 项目源码 | 文件源码
def get_trainer(updater, evaluator, epochs):
    trainer = training.Trainer(updater, (epochs, 'epoch'), out='result')
    trainer.extend(evaluator)
    # TODO: reduce LR -- how to update every X epochs?
    # trainer.extend(extensions.ExponentialShift('lr', 0.1, target=lr*0.0001))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.ProgressBar(
        (epochs, 'epoch'), update_interval=10))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss']))
    return trainer
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = EmbeddingTagger(args.model, 50, 20, 30)
    model.setup_training(args.embed)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    train = CCGBankDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = CCGBankDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.SGD(lr=0.01)
    optimizer.setup(model)
    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 5000, 'iteration'
    log_interval = 200, 'iteration'
    val_model = model.copy()

    trainer.extend(extensions.Evaluator(val_iter, val_model), trigger=val_interval)
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy',
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:chainer-wasserstein-gan    作者:hvy    | 项目源码 | 文件源码
def train(args):
    nz = args.nz
    batch_size = args.batch_size
    epochs = args.epochs
    gpu = args.gpu

    # CIFAR-10 images in range [-1, 1] (tanh generator outputs)
    train, _ = datasets.get_cifar10(withlabel=False, ndim=3, scale=2)
    train -= 1.0
    train_iter = iterators.SerialIterator(train, batch_size)

    z_iter = RandomNoiseIterator(GaussianNoiseGenerator(0, 1, args.nz),
                                 batch_size)

    optimizer_generator = optimizers.RMSprop(lr=0.00005)
    optimizer_critic = optimizers.RMSprop(lr=0.00005)
    optimizer_generator.setup(Generator())
    optimizer_critic.setup(Critic())

    updater = WassersteinGANUpdater(
        iterator=train_iter,
        noise_iterator=z_iter,
        optimizer_generator=optimizer_generator,
        optimizer_critic=optimizer_critic,
        device=gpu)

    trainer = training.Trainer(updater, stop_trigger=(epochs, 'epoch'))
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.LogReport(trigger=(1, 'iteration')))
    trainer.extend(GeneratorSample(), trigger=(1, 'epoch'))
    trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'critic/loss',
            'critic/loss/real', 'critic/loss/fake', 'generator/loss']))
    trainer.run()
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def main():
    unit = 1000
    batchsize = 100
    epoch = 20

    model = L.Classifier(MLP(unit, 10))

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_mnist()
    train_iter = chainer.iterators.SerialIterator(train, batchsize)
    test_iter = chainer.iterators.SerialIterator(test, batchsize, repeat=False, shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out='result')

    trainer.extend(extensions.Evaluator(test_iter, model))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=(epoch, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
项目:vfm    作者:cemoody    | 项目源码 | 文件源码
def fit(model, train, valid, device=-1, batchsize=4096, n_epoch=500,
        resume=None, alpha=1e-3):
    if device >= 0:
        chainer.cuda.get_device(device).use()
        model.to_gpu(device)
    optimizer = chainer.optimizers.Adam(alpha)
    optimizer.setup(model)

    # Setup iterators
    train_iter = chainer.iterators.SerialIterator(train, batchsize)
    valid_iter = chainer.iterators.SerialIterator(valid, batchsize,
                                                  repeat=False, shuffle=False)
    updater = training.StandardUpdater(train_iter, optimizer, device=device)
    trainer = training.Trainer(updater, (n_epoch, 'epoch'),
                               out='out_' + str(device))

    # Setup logging, printing & saving
    keys = ['loss', 'rmse', 'bias', 'kld0', 'kld1']
    keys += ['kldg', 'kldi', 'hypg', 'hypi']
    keys += ['hypglv', 'hypilv']
    reports = ['epoch']
    reports += ['main/' + key for key in keys]
    reports += ['validation/main/rmse']
    trainer.extend(TestModeEvaluator(valid_iter, model, device=device))
    trainer.extend(extensions.Evaluator(valid_iter, model, device=device))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
    trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
    trainer.extend(extensions.PrintReport(reports))
    trainer.extend(extensions.ProgressBar(update_interval=10))

    # If previous model detected, resume
    if resume:
        print("Loading from {}".format(resume))
        chainer.serializers.load_npz(resume, trainer)

    # Run the model
    trainer.run()
项目:chainer-ADDA    作者:pfnet-research    | 项目源码 | 文件源码
def pretrain_source_cnn(data, args, epochs=1000):
    print(":: pretraining source encoder")
    source_cnn = Loss(num_classes=10)
    if args.device >= 0:
        source_cnn.to_gpu()

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(source_cnn)

    train_iterator, test_iterator = data2iterator(data, args.batchsize, multiprocess=False)

    # train_iterator = chainer.iterators.MultiprocessIterator(data, args.batchsize, n_processes=4)

    updater = chainer.training.StandardUpdater(iterator=train_iterator, optimizer=optimizer, device=args.device)
    trainer = chainer.training.Trainer(updater, (epochs, 'epoch') ,out=args.output)

    # learning rate decay
    # trainer.extend(extensions.ExponentialShift("alpha", rate=0.9, init=args.learning_rate, target=args.learning_rate*10E-5))

    trainer.extend(extensions.Evaluator(test_iterator, source_cnn, device=args.device))
    # trainer.extend(extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'), trigger=(10, "epoch"))
    trainer.extend(extensions.snapshot_object(optimizer.target, "source_model_epoch_{.updater.epoch}"), trigger=(epochs, "epoch"))

    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(extensions.LogReport(trigger=(1, "epoch")))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))

    trainer.run()

    return source_cnn
项目:chainer-ADDA    作者:pfnet-research    | 项目源码 | 文件源码
def train_target_cnn(source, target, source_cnn, target_cnn, args, epochs=10000):
    print(":: training encoder with target domain")
    discriminator = Discriminator()

    if args.device >= 0:
        source_cnn.to_gpu()
        target_cnn.to_gpu()
        discriminator.to_gpu()

    # target_optimizer = chainer.optimizers.Adam(alpha=1.0E-5, beta1=0.5)
    target_optimizer = chainer.optimizers.RMSprop(lr=args.lr)
    # target_optimizer = chainer.optimizers.MomentumSGD(lr=1.0E-4, momentum=0.99)
    target_optimizer.setup(target_cnn.encoder)
    target_optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay))

    # discriminator_optimizer = chainer.optimizers.Adam(alpha=1.0E-5, beta1=0.5)
    discriminator_optimizer = chainer.optimizers.RMSprop(lr=args.lr)
    # discriminator_optimizer = chainer.optimizers.MomentumSGD(lr=1.0E-4, momentum=0.99)
    discriminator_optimizer.setup(discriminator)
    discriminator_optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay))

    source_train_iterator, source_test_iterator = data2iterator(source, args.batchsize, multiprocess=False)
    target_train_iterator, target_test_iterator = data2iterator(target, args.batchsize, multiprocess=False)

    updater = ADDAUpdater(source_train_iterator, target_train_iterator, source_cnn, target_optimizer, discriminator_optimizer, args)

    trainer = chainer.training.Trainer(updater, (epochs, 'epoch'), out=args.output)

    trainer.extend(extensions.Evaluator(target_test_iterator, target_cnn, device=args.device))
    # trainer.extend(extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'), trigger=(10, "epoch"))
    trainer.extend(extensions.snapshot_object(target_cnn, "target_model_epoch_{.updater.epoch}"), trigger=(epochs, "epoch"))

    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(extensions.LogReport(trigger=(1, "epoch")))
    trainer.extend(extensions.PrintReport(
        ["epoch", "loss/discrim", "loss/encoder",
         "validation/main/loss", "validation/main/accuracy", "elapsed_time"]))

    trainer.run()
项目:chainer-graph-cnn    作者:pfnet-research    | 项目源码 | 文件源码
def check_train(self, gpu):
        outdir = tempfile.mkdtemp()
        print("outdir: {}".format(outdir))

        n_classes = 2
        batch_size = 32

        devices = {'main': gpu}

        A = np.array([
            [0, 1, 1, 0],
            [1, 0, 0, 1],
            [1, 0, 0, 0],
            [0, 1, 0, 0],
        ]).astype(np.float32)
        model = graph_cnn.GraphCNN(A, n_out=n_classes)

        optimizer = optimizers.Adam(alpha=1e-4)
        optimizer.setup(model)
        train_dataset = EasyDataset(train=True, n_classes=n_classes)
        train_iter = chainer.iterators.MultiprocessIterator(
            train_dataset, batch_size)
        updater = ParallelUpdater(train_iter, optimizer, devices=devices)
        trainer = chainer.training.Trainer(updater, (10, 'epoch'), out=outdir)
        trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
        trainer.extend(extensions.PrintReport(
            ['epoch', 'iteration', 'main/loss', 'main/accuracy']))
        trainer.extend(extensions.ProgressBar())
        trainer.run()
项目:chainer_sklearn    作者:corochann    | 项目源码 | 文件源码
def fit(self, X, y=None, **kwargs):
        """If hyper parameters are set to None, then instance's variable is used,
        this functionality is used Grid search with `set_params` method.
        Also if instance's variable is not set, _default_hyperparam is used. 

        Usage: model.fit(train_dataset) or model.fit(X, y)

        Args:
            train: training dataset, assumes chainer's dataset class 
            test: test dataset for evaluation, assumes chainer's dataset class
            batchsize: batchsize for both training and evaluation
            iterator_class: iterator class used for this training, 
                            currently assumes SerialIterator or MultiProcessIterator
            optimizer: optimizer instance to update parameter
            epoch: training epoch
            out: directory path to save the result
            snapshot_frequency (int): snapshot frequency in epoch. 
                                Negative value indicates not to take snapshot.
            dump_graph: Save computational graph info or not, default is False.
            log_report: Enable LogReport or not
            plot_report: Enable PlotReport or not
            print_report: Enable PrintReport or not
            progress_report: Enable ProgressReport or not
            resume: specify trainer saved path to resume training.

        """
        kwargs = self.filter_sk_params(self.fit_core, kwargs)
        return self.fit_core(X, y, **kwargs)
项目:chainer-EWC    作者:okdshin    | 项目源码 | 文件源码
def train_task(args, train_name, model, epoch_num,
               train_dataset, test_dataset_dict, batch_size):
    optimizer = optimizers.SGD()
    optimizer.setup(model)

    train_iter = iterators.SerialIterator(train_dataset, batch_size)
    test_iter_dict = {name: iterators.SerialIterator(
            test_dataset, batch_size, repeat=False, shuffle=False)
            for name, test_dataset in test_dataset_dict.items()}

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (epoch_num, 'epoch'), out=args.out)
    for name, test_iter in test_iter_dict.items():
        trainer.extend(extensions.Evaluator(test_iter, model), name)
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss'] +
        [test+'/main/loss' for test in test_dataset_dict.keys()] +
        ['main/accuracy'] +
        [test+'/main/accuracy' for test in test_dataset_dict.keys()]))
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.PlotReport(
        [test+"/main/accuracy" for test
         in test_dataset_dict.keys()],
        file_name=train_name+".png"))
    trainer.run()
项目:face-classifier-cnn    作者:nknytk    | 项目源码 | 文件源码
def main(config_file):
    with open(config_file) as fp:
        conf = json.load(fp)
    fe_conf = conf['feature_extractor']
    cl_conf = conf['classifier']

    fe_class = getattr(cnn_feature_extractors, fe_conf['model'])
    feature_extractor = fe_class(n_classes=fe_conf['n_classes'], n_base_units=fe_conf['n_base_units'])
    chainer.serializers.load_npz(fe_conf['out_file'], feature_extractor)

    model = classifiers.MLPClassifier(cl_conf['n_classes'], feature_extractor)
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    device = cl_conf.get('device', -1)
    train_dataset = feature_dataset(os.path.join(cl_conf['dataset_path'], 'train'), model)
    train_iter = chainer.iterators.SerialIterator(train_dataset, conf.get('batch_size', 1))
    updater = chainer.training.StandardUpdater(train_iter, optimizer, device=device)
    trainer = chainer.training.Trainer(updater, (cl_conf['epoch'], 'epoch'), out='out_re')

    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.ProgressBar(update_interval=10))

    test_dataset_path = os.path.join(cl_conf['dataset_path'], 'test')
    if os.path.exists(test_dataset_path):
        test_dataset = feature_dataset(test_dataset_path, model)
        test_iter = chainer.iterators.SerialIterator(test_dataset, 10, repeat=False, shuffle=False)
        trainer.extend(extensions.Evaluator(test_iter, model, device=device))
        trainer.extend(extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss',
            'main/accuracy', 'validation/main/accuracy'
        ]))
    else:
        trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy']))

    trainer.run()

    chainer.serializers.save_npz(cl_conf['out_file'], model)
项目:face-classifier-cnn    作者:nknytk    | 项目源码 | 文件源码
def main(config_file):
    with open(config_file) as fp:
        conf = json.load(fp)['feature_extractor']

    model_class = getattr(cnn_feature_extractors, conf['model'])
    model = model_class(conf['n_classes'], conf['n_base_units'])

    resume_file = conf['out_file'] + '.to_resume'
    if os.path.exists(resume_file):
        chainer.serializers.load_npz(resume_file, model)
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    device = conf.get('device', -1)
    train_dataset = create_dataset(os.path.join(conf['dataset_path'], 'train'))
    train_iter = chainer.iterators.SerialIterator(train_dataset, conf.get('batch_size', 10))
    updater = chainer.training.StandardUpdater(train_iter, optimizer, device=device)
    trainer = chainer.training.Trainer(updater, (conf['epoch'], 'epoch'), out='out')

    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.ProgressBar(update_interval=10))

    test_dataset_path = os.path.join(conf['dataset_path'], 'test')
    if os.path.exists(test_dataset_path):
        test_dataset = create_dataset(test_dataset_path)
        test_iter = chainer.iterators.SerialIterator(test_dataset, 20, repeat=False, shuffle=False)
        trainer.extend(extensions.Evaluator(test_iter, model, device=device))
        trainer.extend(extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss',
            'main/accuracy', 'validation/main/accuracy'
        ]))
    else:
        trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy']))

    trainer.run()

    model = model.to_cpu()
    chainer.serializers.save_npz(conf['out_file'], model)
项目:char-rnn-text-generation    作者:yxtay    | 项目源码 | 文件源码
def train_main(args):
    """
    trains model specfied in args.
    main method for train subcommand.
    """
    # load text
    with open(args.text_path) as f:
        text = f.read()
    logger.info("corpus length: %s.", len(text))

    # data iterator
    data_iter = DataIterator(text, args.batch_size, args.seq_len)

    # load or build model
    if args.restore:
        logger.info("restoring model.")
        load_path = args.checkpoint_path if args.restore is True else args.restore
        model = load_model(load_path)
    else:
        net = Network(vocab_size=VOCAB_SIZE,
                      embedding_size=args.embedding_size,
                      rnn_size=args.rnn_size,
                      num_layers=args.num_layers,
                      drop_rate=args.drop_rate)
        model = L.Classifier(net)

    # make checkpoint directory
    log_dir = make_dirs(args.checkpoint_path)
    with open("{}.json".format(args.checkpoint_path), "w") as f:
        json.dump(model.predictor.args, f, indent=2)
    chainer.serializers.save_npz(args.checkpoint_path, model)
    logger.info("model saved: %s.", args.checkpoint_path)

    # optimizer
    optimizer = chainer.optimizers.Adam(alpha=args.learning_rate)
    optimizer.setup(model)
    # clip gradient norm
    optimizer.add_hook(chainer.optimizer.GradientClipping(args.clip_norm))

    # trainer
    updater = BpttUpdater(data_iter, optimizer)
    trainer = chainer.training.Trainer(updater, (args.num_epochs, 'epoch'), out=log_dir)
    trainer.extend(extensions.snapshot_object(model, filename=os.path.basename(args.checkpoint_path)))
    trainer.extend(extensions.ProgressBar(update_interval=1))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PlotReport(y_keys=["main/loss"]))
    trainer.extend(LoggerExtension(text))

    # training start
    model.predictor.reset_state()
    logger.info("start of training.")
    time_train = time.time()
    trainer.run()

    # training end
    duration_train = time.time() - time_train
    logger.info("end of training, duration: %ds.", duration_train)
    # generate text
    seed = generate_seed(text)
    generate_text(model, seed, 1024, 3)
    return model
项目:Comicolorization    作者:DwangoMediaVillage    | 项目源码 | 文件源码
def create_trainer(
        config,
        project_path,
        updater,
        model,
        eval_func,
        iterator_test,
        iterator_train_varidation,
        loss_names,
        converter=chainer.dataset.convert.concat_examples,
):
    # type: (TrainConfig, str, any, typing.Dict, any, any, any, any, any) -> any
    def _make_evaluator(iterator):
        return utility.chainer_utility.NoVariableEvaluator(
            iterator,
            target=model,
            converter=converter,
            eval_func=eval_func,
            device=config.gpu,
        )

    trainer = chainer.training.Trainer(updater, out=project_path)

    log_trigger = (config.log_iteration, 'iteration')
    save_trigger = (config.save_iteration, 'iteration')

    eval_test_name = 'eval/test'
    eval_train_name = 'eval/train'

    snapshot = extensions.snapshot_object(model['main'], '{.updater.iteration}.model')
    trainer.extend(snapshot, trigger=save_trigger)

    trainer.extend(extensions.dump_graph('main/' + loss_names[0], out_name='main.dot'))

    trainer.extend(_make_evaluator(iterator_test), name=eval_test_name, trigger=log_trigger)
    trainer.extend(_make_evaluator(iterator_train_varidation), name=eval_train_name, trigger=log_trigger)

    report_target = []
    for evaluator_name in ['', eval_test_name + '/', eval_train_name + '/']:
        for model_name in ['main/']:
            for loss_name in loss_names:
                report_target.append(evaluator_name + model_name + loss_name)

    trainer.extend(extensions.LogReport(trigger=log_trigger, log_name="log.txt"))
    trainer.extend(extensions.PrintReport(report_target))

    return trainer
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = LSTMParser(args.model, args.word_emb_size, args.afix_emb_size, args.nlayers,
            args.hidden_dim, args.elu_dim, args.dep_dim, args.dropout_ratio)
    with open(args.model + "/params", "w") as f: log(args, f)

    if args.initmodel:
        print 'Load model from', args.initmodel
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print 'Load pretrained word embeddings from', args.pretrained
        model.load_pretrained_embeddings(args.pretrained)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    train = LSTMParserDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = LSTMParserDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.Adam(beta2=0.9)
    # optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
    optimizer.setup(model)
    optimizer.add_hook(WeightDecay(1e-6))
    # optimizer.add_hook(GradientClipping(5.))
    updater = training.StandardUpdater(train_iter, optimizer,
            device=args.gpu, converter=converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 1000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.Evaluator(val_iter, eval_model,
                    converter, device=args.gpu), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration',
        'main/tagging_accuracy', 'main/tagging_loss',
        'main/parsing_accuracy', 'main/parsing_loss',
        'validation/main/tagging_accuracy',
        'validation/main/parsing_accuracy'
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = LSTMTagger(args.model, args.word_emb_size, args.afix_emb_size,
            args.nlayers, args.hidden_dim, args.relu_dim, args.dropout_ratio)
    with open(args.model + "/params", "w") as f:
            log(args, f)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print('Load pretrained word embeddings from', args.pretrained)
        model.load_pretrained_embeddings(args.pretrained)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    train = LSTMTaggerDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = LSTMTaggerDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
    optimizer.setup(model)
    optimizer.add_hook(WeightDecay(1e-6))
    optimizer.add_hook(GradientClipping(5.))
    updater = training.StandardUpdater(train_iter, optimizer,
            device=args.gpu, converter=converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 2000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.Evaluator(
        val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy',
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = BiaffineJaLSTMParser(args.model, args.word_emb_size, args.char_emb_size,
            args.nlayers, args.hidden_dim, args.dep_dim, args.dropout_ratio)
    with open(args.model + "/params", "w") as f: log(args, f)

    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print('Load pretrained word embeddings from', args.pretrained)
        model.load_pretrained_embeddings(args.pretrained)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    train = LSTMParserDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = LSTMParserDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.Adam(beta2=0.9)
    # optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
    optimizer.setup(model)
    optimizer.add_hook(WeightDecay(2e-6))
    # optimizer.add_hook(GradientClipping(5.))
    updater = training.StandardUpdater(train_iter, optimizer,
            device=args.gpu, converter=converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 1000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.ExponentialShift(
                    "eps", .75, 2e-3), trigger=(2500, 'iteration'))
    trainer.extend(extensions.Evaluator(
        val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration',
        'main/tagging_accuracy', 'main/tagging_loss',
        'main/parsing_accuracy', 'main/parsing_loss',
        'validation/main/tagging_accuracy',
        'validation/main/parsing_accuracy'
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = JaLSTMParser(args.model, args.word_emb_size, args.char_emb_size,
            args.nlayers, args.hidden_dim, args.relu_dim, args.dep_dim, args.dropout_ratio)
    with open(args.model + "/params", "w") as f: log(args, f)

    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print('Load pretrained word embeddings from', args.pretrained)
        model.load_pretrained_embeddings(args.pretrained)

    train = LSTMParserDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = LSTMParserDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.Adam(beta2=0.9)
    # optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
    optimizer.setup(model)
    optimizer.add_hook(WeightDecay(1e-6))
    # optimizer.add_hook(GradientClipping(5.))
    updater = training.StandardUpdater(train_iter, optimizer, converter=converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 1000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.Evaluator(
        val_iter, eval_model, converter), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/tagging_loss',
        'main/tagging_accuracy', 'main/tagging_loss',
        'main/parsing_accuracy', 'main/parsing_loss',
        'validation/main/tagging_loss', 'validation/main/tagging_accuracy',
        'validation/main/parsing_loss', 'validation/main/parsing_accuracy'
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = LSTMTagger(args.model, args.word_emb_size, args.char_emb_size,
            args.nlayers, args.hidden_dim, args.relu_dim, args.dropout_ratio)
    with open(args.model + "/params", "w") as f:
            log(args, f)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print('Load pretrained word embeddings from', args.pretrained)
        model.load_pretrained_embeddings(args.pretrained)

    train = LSTMTaggerDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = LSTMTaggerDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
    optimizer.setup(model)
    optimizer.add_hook(WeightDecay(1e-6))
    # optimizer.add_hook(GradientClipping(5.))
    updater = training.StandardUpdater(train_iter, optimizer, converter=converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 1000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.Evaluator(
        val_iter, eval_model, converter), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy',
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = LSTMTagger(args.model, args.word_emb_size, args.afix_emb_size,
            args.nlayers, args.hidden_dim, args.relu_dim, args.dropout_ratio)
    with open(args.model + "/params", "w") as f:
            log(args, f)
    if args.initmodel:
        print 'Load model from', args.initmodel
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print 'Load pretrained word embeddings from', args.pretrained
        model.load_pretrained_embeddings(args.pretrained)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    train = LSTMTaggerDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = LSTMTaggerDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
    optimizer.setup(model)
    optimizer.add_hook(WeightDecay(1e-6))
    optimizer.add_hook(GradientClipping(5.))
    updater = training.StandardUpdater(train_iter, optimizer,
            device=args.gpu, converter=converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 2000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.Evaluator(
        val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy',
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = LSTMParser(args.model, args.word_emb_size, args.afix_emb_size, args.nlayers,
            args.hidden_dim, args.elu_dim, args.dep_dim, args.dropout_ratio)
    with open(args.model + "/params", "w") as f: log(args, f)

    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print('Load pretrained word embeddings from', args.pretrained)
        model.load_pretrained_embeddings(args.pretrained)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    train = LSTMParserDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = LSTMParserDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.Adam(beta2=0.9)
    # optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
    optimizer.setup(model)
    optimizer.add_hook(WeightDecay(1e-6))
    # optimizer.add_hook(GradientClipping(5.))
    updater = training.StandardUpdater(train_iter, optimizer,
            device=args.gpu, converter=converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 1000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.Evaluator(val_iter, eval_model,
                    converter, device=args.gpu), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration',
        'main/tagging_accuracy', 'main/tagging_loss',
        'main/parsing_accuracy', 'main/parsing_loss',
        'validation/main/tagging_accuracy',
        'validation/main/parsing_accuracy'
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = JaCCGEmbeddingTagger(args.model,
                args.word_emb_size, args.char_emb_size)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print('Load pretrained word embeddings from', args.pretrained)
        model.load_pretrained_embeddings(args.pretrained)

    train = JaCCGTaggerDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = JaCCGTaggerDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.AdaGrad()
    optimizer.setup(model)
    # optimizer.add_hook(WeightDecay(1e-8))
    my_converter = lambda x, dev: convert.concat_examples(x, dev, (None,-1,None,None))
    updater = training.StandardUpdater(train_iter, optimizer, converter=my_converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 1000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.Evaluator(
        val_iter, eval_model, my_converter), trigger=val_interval)
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy',
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = PeepHoleLSTMTagger(args.model, args.word_emb_size, args.afix_emb_size,
            args.nlayers, args.hidden_dim, args.relu_dim, args.dropout_ratio)
    with open(args.model + "/params", "w") as f:
            log(args, f)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    if args.pretrained:
        print('Load pretrained word embeddings from', args.pretrained)
        model.load_pretrained_embeddings(args.pretrained)

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    converter = lambda x, device: \
            concat_examples(x, device=device, padding=-1)

    train = LSTMTaggerDataset(args.model, args.train)
    train_iter = SerialIterator(train, args.batchsize)
    val = LSTMTaggerDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.MomentumSGD(momentum=0.7)
    optimizer.setup(model)
    optimizer.add_hook(WeightDecay(1e-6))
    optimizer.add_hook(GradientClipping(5.))
    updater = training.StandardUpdater(train_iter, optimizer,
            device=args.gpu, converter=converter)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 1000, 'iteration'
    log_interval = 200, 'iteration'

    eval_model = model.copy()
    eval_model.train = False

    trainer.extend(extensions.Evaluator(
        val_iter, eval_model, converter, device=args.gpu), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy',
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:DeepPoseComparison    作者:ynaka81    | 项目源码 | 文件源码
def start(self):
        """ Train pose net. """
        # set random seed.
        if self.seed is not None:
            random.seed(self.seed)
            np.random.seed(self.seed)
            if self.gpu >= 0:
                chainer.cuda.cupy.random.seed(self.seed)
        # initialize model to train.
        model = AlexNet(self.Nj, self.use_visibility)
        if self.resume_model:
            serializers.load_npz(self.resume_model, model)
        # prepare gpu.
        if self.gpu >= 0:
            chainer.cuda.get_device(self.gpu).use()
            model.to_gpu()
        # load the datasets.
        train = PoseDataset(self.train, data_augmentation=self.data_augmentation)
        val = PoseDataset(self.val, data_augmentation=False)
        # training/validation iterators.
        train_iter = chainer.iterators.MultiprocessIterator(
            train, self.batchsize)
        val_iter = chainer.iterators.MultiprocessIterator(
            val, self.batchsize, repeat=False, shuffle=False)
        # set up an optimizer.
        optimizer = self._get_optimizer()
        optimizer.setup(model)
        if self.resume_opt:
            chainer.serializers.load_npz(self.resume_opt, optimizer)
        # set up a trainer.
        updater = training.StandardUpdater(train_iter, optimizer, device=self.gpu)
        trainer = training.Trainer(
            updater, (self.epoch, 'epoch'), os.path.join(self.out, 'chainer'))
        # standard trainer settings
        trainer.extend(extensions.dump_graph('main/loss'))
        val_interval = (10, 'epoch')
        trainer.extend(TestModeEvaluator(val_iter, model, device=self.gpu), trigger=val_interval)
        # save parameters and optimization state per validation step
        resume_interval = (self.epoch/10, 'epoch')
        trainer.extend(extensions.snapshot_object(
            model, "epoch-{.updater.epoch}.model"), trigger=resume_interval)
        trainer.extend(extensions.snapshot_object(
            optimizer, "epoch-{.updater.epoch}.state"), trigger=resume_interval)
        trainer.extend(extensions.snapshot(
            filename="epoch-{.updater.epoch}.iter"), trigger=resume_interval)
        # show log
        log_interval = (10, "iteration")
        trainer.extend(extensions.LogReport(trigger=log_interval))
        trainer.extend(extensions.observe_lr(), trigger=log_interval)
        trainer.extend(extensions.PrintReport(
            ['epoch', 'main/loss', 'validation/main/loss', 'lr']), trigger=log_interval)
        trainer.extend(extensions.ProgressBar(update_interval=10))
        # start training
        if self.resume:
            chainer.serializers.load_npz(self.resume, trainer)
        trainer.run()
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def train(args):
    time_start = timer()
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        cuda.check_cuda_available()


    if args.path_vocab == '':
        vocab = create_from_dir(args.path_corpus)
    else:
        vocab = Vocabulary()
        vocab.load(args.path_vocab)
        logger.info("loaded vocabulary")

    if args.context_representation != 'word': # for deps or ner context representation, we need a new context vocab for NS or HSM loss function.
        vocab_context = create_from_annotated_dir(args.path_corpus, representation=args.context_representation)
    else :
        vocab_context = vocab

    loss_func = get_loss_func(args, vocab_context)
    model = get_model(args, loss_func, vocab)

    if args.gpu >= 0:
        model.to_gpu()
        logger.debug("model sent to gpu")

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    if os.path.isfile(args.path_corpus):
        train, val = get_data(args.path_corpus, vocab)
        if args.test:
            train = train[:100]
            val = val[:100]
        train_iter = WindowIterator(train, args.window, args.batchsize)
        val_iter = WindowIterator(val, args.window, args.batchsize, repeat=False)
    else:
        train_iter = DirWindowIterator(path=args.path_corpus, vocab=vocab, window_size=args.window, batch_size=args.batchsize)
    updater = training.StandardUpdater(train_iter, optimizer, converter=convert, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.path_out)

    if os.path.isfile(args.path_corpus):
        trainer.extend(extensions.Evaluator(val_iter, model, converter=convert, device=args.gpu))
    trainer.extend(extensions.LogReport())
    if os.path.isfile(args.path_corpus):
        trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']))
    else:
        trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'elapsed_time']))
    # trainer.extend(extensions.ProgressBar())
    trainer.run()
    model = create_model(args, model, vocab)
    time_end = timer()
    model.metadata["execution_time"] = time_end - time_start
    return model
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def main(options):

    #load the config params
    gpu = options['gpu']
    data_path = options['path_dataset']
    embeddings_path = options['path_vectors']
    n_epoch = options['epochs']
    batch_size = options['batchsize']
    test = options['test']
    embed_dim = options['embed_dim']
    freeze = options['freeze_embeddings']
    distance_embed_dim = options['distance_embed_dim']

    #load the data
    data_processor = DataProcessor(data_path)
    data_processor.prepare_dataset()
    train_data = data_processor.train_data
    test_data = data_processor.test_data

    vocab = data_processor.vocab
    cnn = CNN(n_vocab=len(vocab), input_channel=1,
                    output_channel=100, 
                    n_label=19, 
                    embed_dim=embed_dim, position_dims=distance_embed_dim, freeze=freeze)
    cnn.load_embeddings(embeddings_path, data_processor.vocab)
    model = L.Classifier(cnn)

    #use GPU if flag is set
    if gpu >= 0:
        model.to_gpu()

    #setup the optimizer
    optimizer = O.Adam()
    optimizer.setup(model)

    train_iter = chainer.iterators.SerialIterator(train_data, batch_size)
    test_iter = chainer.iterators.SerialIterator(test_data, batch_size,repeat=False, shuffle=False) 

    updater = training.StandardUpdater(train_iter, optimizer, converter=convert.concat_examples, device=gpu)
    trainer = training.Trainer(updater, (n_epoch, 'epoch'))

    # Evaluation
    test_model = model.copy()
    test_model.predictor.train = False
    trainer.extend(extensions.Evaluator(test_iter, test_model, device=gpu, converter=convert.concat_examples))


    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
            'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=10))


    trainer.run()
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def main(options):

    #load the config params
    gpu = options['gpu']
    data_path = options['path_dataset']
    embeddings_path = options['path_vectors']
    n_epoch = options['epochs']
    batchsize = options['batchsize']
    test = options['test']
    embed_dim = options['embed_dim']
    freeze = options['freeze_embeddings']

    #load the data
    data_processor = DataProcessor(data_path, test)
    data_processor.prepare_dataset()
    train_data = data_processor.train_data
    dev_data = data_processor.dev_data
    test_data = data_processor.test_data

    vocab = data_processor.vocab
    cnn = CNN(n_vocab=len(vocab), input_channel=1,
                  output_channel=10, n_label=2, embed_dim=embed_dim, freeze=freeze)
    cnn.load_embeddings(embeddings_path, data_processor.vocab)
    model = L.Classifier(cnn)
    if gpu >= 0:
        model.to_gpu()

    #setup the optimizer
    optimizer = O.Adam()
    optimizer.setup(model)


    train_iter = chainer.iterators.SerialIterator(train_data, batchsize)
    dev_iter = chainer.iterators.SerialIterator(dev_data, batchsize,repeat=False, shuffle=False)
    test_iter = chainer.iterators.SerialIterator(test_data, batchsize,repeat=False, shuffle=False) 
    batch1 = train_iter.next()
    batch2 = dev_iter.next()
    updater = training.StandardUpdater(train_iter, optimizer, converter=util.concat_examples, device=gpu)
    trainer = training.Trainer(updater, (n_epoch, 'epoch'))

    # Evaluation
    eval_model = model.copy()
    eval_model.predictor.train = False
    trainer.extend(extensions.Evaluator(dev_iter, eval_model, device=gpu, converter=util.concat_examples))

    test_model = model.copy()
    test_model.predictor.train = False

    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=10))


    trainer.run()
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def __init__(self, folder, chain, train, test, batchsize=500, resume=True, gpu=0, nepoch=1, reports=[]):
        self.reports = reports
        self.nepoch = nepoch
        self.folder = folder
        self.chain = chain
        self.gpu = gpu

        if self.gpu >= 0:
            chainer.cuda.get_device(gpu).use()
            chain.to_gpu(gpu)
        self.eval_chain = eval_chain = chain.copy()
        self.chain.test = False
        self.eval_chain.test = True
        self.testset = test

        if not os.path.exists(folder):
            os.makedirs(folder)

        train_iter = chainer.iterators.SerialIterator(train, batchsize, shuffle=True)
        test_iter = chainer.iterators.SerialIterator(test, batchsize,
                                                     repeat=False, shuffle=False)

        updater = training.StandardUpdater(train_iter, chain.optimizer, device=gpu)
        trainer = training.Trainer(updater, (nepoch, 'epoch'), out=folder)
        # trainer.extend(TrainingModeSwitch(chain))
        trainer.extend(extensions.dump_graph('main/loss'))
        trainer.extend(extensions.Evaluator(test_iter, eval_chain, device=gpu), trigger=(1,'epoch'))
        trainer.extend(extensions.snapshot_object(
            chain, 'chain_snapshot_epoch_{.updater.epoch:06}'), trigger=(1,'epoch'))
        trainer.extend(extensions.snapshot(
            filename='snapshot_epoch_{.updater.epoch:06}'), trigger=(1,'epoch'))
        trainer.extend(extensions.LogReport(trigger=(1,'epoch')), trigger=(1,'iteration'))
        trainer.extend(extensions.PrintReport(
            ['epoch']+reports), trigger=IntervalTrigger(1,'epoch'))

        self.trainer = trainer

        if resume:
            #if resumeFrom is not None:
            #    trainerFile = os.path.join(resumeFrom[0],'snapshot_epoch_{:06}'.format(resumeFrom[1]))
            #    S.load_npz(trainerFile, trainer)
            i = 1
            trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
            while i <= nepoch and os.path.isfile(trainerFile):
                i = i + 1
                trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
            i = i - 1
            trainerFile = os.path.join(folder,'snapshot_epoch_{:06}'.format(i))
            if i >= 0 and os.path.isfile(trainerFile):
                S.load_npz(trainerFile, trainer)
项目:paint_transfer_c92    作者:Hiroshiba    | 项目源码 | 文件源码
def create_trainer(
        config: TrainConfig,
        project_path: str,
        updater,
        model: typing.Dict,
        eval_func,
        iterator_test,
        iterator_train_eval,
        loss_names,
        converter=chainer.dataset.convert.concat_examples,
        log_name='log.txt',
):
    trainer = chainer.training.Trainer(updater, out=project_path)

    log_trigger = (config.log_iteration, 'iteration')
    save_trigger = (config.save_iteration, 'iteration')

    eval_test_name = 'eval/test'
    eval_train_name = 'eval/train'

    snapshot = extensions.snapshot_object(model['encoder'], 'encoder{.updater.iteration}.model')
    trainer.extend(snapshot, trigger=save_trigger)
    snapshot = extensions.snapshot_object(model['generator'], 'generator{.updater.iteration}.model')
    trainer.extend(snapshot, trigger=save_trigger)
    snapshot = extensions.snapshot_object(model['mismatch_discriminator'], 'mismatch_discriminator{.updater.iteration}.model')
    trainer.extend(snapshot, trigger=save_trigger)

    trainer.extend(utility.chainer.dump_graph([
        'encoder/' + loss_names[0],
        'generator/' + loss_names[0],
        'mismatch_discriminator/' + loss_names[0],
    ], out_name='main.dot'))

    def _make_evaluator(iterator):
        return utility.chainer.NoVariableEvaluator(
            iterator,
            target=model,
            converter=converter,
            eval_func=eval_func,
            device=config.gpu,
        )

    trainer.extend(_make_evaluator(iterator_test), name=eval_test_name, trigger=log_trigger)
    trainer.extend(_make_evaluator(iterator_train_eval), name=eval_train_name, trigger=log_trigger)

    report_target = []
    for evaluator_name in ['', eval_test_name + '/', eval_train_name + '/']:
        for model_name in [s + '/' for s in model.keys()]:
            for loss_name in set(loss_names):
                report_target.append(evaluator_name + model_name + loss_name)

    trainer.extend(extensions.LogReport(trigger=log_trigger, log_name=log_name))
    trainer.extend(extensions.PrintReport(report_target))

    return trainer