Python chainer.optimizers 模块,SGD 实例源码

我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用chainer.optimizers.SGD

项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def decay_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def decay_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def get_optimizer(self, name, lr, momentum=0.9):
        if name.lower() == "adam":
            return optimizers.Adam(alpha=lr, beta1=momentum)
        if name.lower() == "smorms3":
            return optimizers.SMORMS3(lr=lr)
        if name.lower() == "adagrad":
            return optimizers.AdaGrad(lr=lr)
        if name.lower() == "adadelta":
            return optimizers.AdaDelta(rho=momentum)
        if name.lower() == "nesterov" or name.lower() == "nesterovag":
            return optimizers.NesterovAG(lr=lr, momentum=momentum)
        if name.lower() == "rmsprop":
            return optimizers.RMSprop(lr=lr, alpha=momentum)
        if name.lower() == "momentumsgd":
            return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
        if name.lower() == "sgd":
            return optimizers.SGD(lr=lr)
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def decrease_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
项目:soft-dtw    作者:mblondel    | 项目源码 | 文件源码
def train(network, loss, X_tr, Y_tr, X_te, Y_te, n_epochs=30, gamma=1):
    model= Objective(network, loss=loss, gamma=gamma)

    #optimizer = optimizers.SGD()
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    train = tuple_dataset.TupleDataset(X_tr, Y_tr)
    test = tuple_dataset.TupleDataset(X_te, Y_te)

    train_iter = iterators.SerialIterator(train, batch_size=1, shuffle=True)
    test_iter = iterators.SerialIterator(test, batch_size=1, repeat=False,
                                         shuffle=False)
    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (n_epochs, 'epoch'))

    trainer.run()
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def decay_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return
        opt.alpha *= factor
        return
    raise NotImplementationError()
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return optimizers.SGD(lr=lr)
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return optimizers.SGD(lr=lr)
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def get_learning_rate(opt):
    if isinstance(opt, optimizers.NesterovAG):
        return opt.lr
    if isinstance(opt, optimizers.MomentumSGD):
        return opt.lr
    if isinstance(opt, optimizers.SGD):
        return opt.lr
    if isinstance(opt, optimizers.Adam):
        return opt.alpha
    raise NotImplementedError()
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def set_learning_rate(opt, lr):
    if isinstance(opt, optimizers.NesterovAG):
        opt.lr = lr
        return
    if isinstance(opt, optimizers.MomentumSGD):
        opt.lr = lr
        return
    if isinstance(opt, optimizers.SGD):
        opt.lr = lr
        return
    if isinstance(opt, optimizers.Adam):
        opt.alpha = lr
        return
    raise NotImplementedError()
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def set_momentum(opt, momentum):
    if isinstance(opt, optimizers.NesterovAG):
        opt.momentum = momentum
        return
    if isinstance(opt, optimizers.MomentumSGD):
        opt.momentum = momentum
        return
    if isinstance(opt, optimizers.SGD):
        return
    if isinstance(opt, optimizers.Adam):
        opt.beta1 = momentum
        return
    raise NotImplementedError()
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum):
    if name == "sgd":
        return optimizers.SGD(lr=lr)
    if name == "msgd":
        return optimizers.MomentumSGD(lr=lr, momentum=momentum)
    if name == "nesterov":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    raise NotImplementedError()
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def get_current_learning_rate(opt):
    if isinstance(opt, optimizers.NesterovAG):
        return opt.lr
    if isinstance(opt, optimizers.MomentumSGD):
        return opt.lr
    if isinstance(opt, optimizers.SGD):
        return opt.lr
    if isinstance(opt, optimizers.Adam):
        return opt.alpha
    raise NotImplementedError()
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum):
    if name == "sgd":
        return optimizers.SGD(lr=lr)
    if name == "msgd":
        return optimizers.MomentumSGD(lr=lr, momentum=momentum)
    if name == "nesterov":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    raise NotImplementedError()
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def create(self):
        return optimizers.SGD(0.1)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_weight_decay(self):
        w = self.target.param.data
        g = self.target.param.grad

        decay = 0.2
        expect = w - g - decay * w

        opt = optimizers.SGD(lr=1)
        opt.setup(self.target)
        opt.add_hook(optimizer.WeightDecay(decay))
        opt.update()

        gradient_check.assert_allclose(expect, w)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_lasso(self):
        w = self.target.param.data
        g = self.target.param.grad
        xp = cuda.get_array_module(w)
        decay = 0.2
        expect = w - g - decay * xp.sign(w)

        opt = optimizers.SGD(lr=1)
        opt.setup(self.target)
        opt.add_hook(optimizer.Lasso(decay))
        opt.update()

        gradient_check.assert_allclose(expect, w)
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def get_current_learning_rate(opt):
    if isinstance(opt, optimizers.NesterovAG):
        return opt.lr
    if isinstance(opt, optimizers.MomentumSGD):
        return opt.lr
    if isinstance(opt, optimizers.SGD):
        return opt.lr
    if isinstance(opt, optimizers.Adam):
        return opt.alpha
    raise NotImplementedError()
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def set_learning_rate(opt, lr):
    if isinstance(opt, optimizers.NesterovAG):
        opt.lr = lr
        return
    if isinstance(opt, optimizers.MomentumSGD):
        opt.lr = lr
        return
    if isinstance(opt, optimizers.SGD):
        opt.lr = lr
        return
    if isinstance(opt, optimizers.Adam):
        opt.alpha = lr
        return
    raise NotImplementedError()
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum):
    name = name.lower()
    if name == "sgd":
        return optimizers.SGD(lr=lr)
    if name == "msgd":
        return optimizers.MomentumSGD(lr=lr, momentum=momentum)
    if name == "nesterov":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    raise NotImplementedError()
项目:chainercv    作者:chainer    | 项目源码 | 文件源码
def check_gradient_scaling(self):
        w = self.target.param.array
        g = self.target.param.grad

        rate = 0.2
        expect = w - g * rate

        opt = optimizers.SGD(lr=1)
        opt.setup(self.target)
        opt.add_hook(GradientScaling(rate))
        opt.update()

        testing.assert_allclose(expect, w)
项目:deel    作者:uei    | 项目源码 | 文件源码
def __init__(self,optimizer=None,vocab=None,n_input_units=1000,
                    n_units=650,grad_clip=5,bproplen=35):

        if vocab is None:
            vocab=BatchTrainer.vocab
        self.vocab=vocab
        n_vocab = len(vocab)
        super(LSTM,self).__init__('LSTM')

        self.func = model.lstm.RNNLM(n_input_units=n_input_units,n_vocab=n_vocab,n_units=n_units)
        self.func.compute_accuracy = False 
        for param in self.func.params():
            data = param.data
            data[:] = np.random.uniform(-0.1, 0.1, data.shape)


        if Deel.gpu>=0:
            self.func.to_gpu()


        if optimizer is None:
            self.optimizer = optimizers.SGD(lr=1.)
        self.optimizer.setup(self.func)
        self.clip = chainer.optimizer.GradientClipping(grad_clip)
        self.optimizer.add_hook(self.clip)

        self.accum_loss = 0
        self.cur_log_perp =  Deel.xp.zeros(())
项目:mlpnlp-nmt    作者:mlpnlp    | 项目源码 | 文件源码
def setOptimizer(args, EncDecAtt):
    # optimizer???
    if args.optimizer == 'SGD':
        optimizer = chaOpt.SGD(lr=args.lrate)
        sys.stdout.write(
            '# SET Learning %s: initial learning rate: %e\n' %
            (args.optimizer, optimizer.lr))
    elif args.optimizer == 'Adam':
        # assert 0, "Currently Adam is not supported for asynchronous update"
        optimizer = chaOpt.Adam(alpha=args.lrate)
        sys.stdout.write(
            '# SET Learning %s: initial learning rate: %e\n' %
            (args.optimizer, optimizer.alpha))
    elif args.optimizer == 'MomentumSGD':
        optimizer = chaOpt.MomentumSGD(lr=args.lrate)
        sys.stdout.write(
            '# SET Learning %s: initial learning rate: %e\n' %
            (args.optimizer, optimizer.lr))
    elif args.optimizer == 'AdaDelta':
        optimizer = chaOpt.AdaDelta(rho=args.lrate)
        sys.stdout.write(
            '# SET Learning %s: initial learning rate: %e\n' %
            (args.optimizer, optimizer.rho))
    else:
        assert 0, "ERROR"

    optimizer.setup(EncDecAtt.model)  # ???optimizer?????????
    if args.optimizer == 'Adam':
        optimizer.t = 1  # warning?????????hack ???????????

    return optimizer
项目:RL_reversi    作者:ryogrid    | 项目源码 | 文件源码
def __init__(self, turn,name="DQN",e=1,dispPred=False):
        self.name=name
        self.myturn=turn
        self.model = MLP(64, 256, 64)
        self.optimizer = optimizers.SGD()
        self.optimizer.setup(self.model)
        self.e=e
        self.gamma=0.95
        self.dispPred=dispPred
        self.last_move=None
        self.last_board=None
        self.last_pred=None
        self.totalgamecount=0
        self.rwin,self.rlose,self.rdraw,self.rmiss=1,-1,0,-1.5
项目:RL_reversi    作者:ryogrid    | 项目源码 | 文件源码
def __init__(self, turn,name="DQN",e=1,dispPred=False):
        self.name=name
        self.myturn=turn
        self.model = MLP(9, 162,9)
        self.optimizer = optimizers.SGD()
        self.optimizer.setup(self.model)
        self.e=e
        self.gamma=0.95
        self.dispPred=dispPred
        self.last_move=None
        self.last_board=None
        self.last_pred=None
        self.totalgamecount=0
        self.rwin,self.rlose,self.rdraw,self.rmiss=1,-1,0,-1.5
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def get_current_learning_rate(opt):
    if isinstance(opt, optimizers.NesterovAG):
        return opt.lr
    if isinstance(opt, optimizers.Adam):
        return opt.alpha
    if isinstance(opt, optimizers.SGD):
        return opt.lr
    raise NotImplementationError()
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum):
    if name == "nesterov":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name == "sgd":
        return optimizers.SGD(lr=lr)
    raise NotImplementationError()
项目:chainer-EWC    作者:okdshin    | 项目源码 | 文件源码
def train_task(args, train_name, model, epoch_num,
               train_dataset, test_dataset_dict, batch_size):
    optimizer = optimizers.SGD()
    optimizer.setup(model)

    train_iter = iterators.SerialIterator(train_dataset, batch_size)
    test_iter_dict = {name: iterators.SerialIterator(
            test_dataset, batch_size, repeat=False, shuffle=False)
            for name, test_dataset in test_dataset_dict.items()}

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (epoch_num, 'epoch'), out=args.out)
    for name, test_iter in test_iter_dict.items():
        trainer.extend(extensions.Evaluator(test_iter, model), name)
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss'] +
        [test+'/main/loss' for test in test_dataset_dict.keys()] +
        ['main/accuracy'] +
        [test+'/main/accuracy' for test in test_dataset_dict.keys()]))
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.PlotReport(
        [test+"/main/accuracy" for test
         in test_dataset_dict.keys()],
        file_name=train_name+".png"))
    trainer.run()