Python chainer.optimizers 模块,RMSprop() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用chainer.optimizers.RMSprop()

项目:chainer-faster-rcnn    作者:mitmul    | 项目源码 | 文件源码
def get_optimizer(model, opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    optimizer.setup(model)
    if opt == 'MomentumSGD':
        optimizer.add_hook(
            chainer.optimizer.WeightDecay(weight_decay))

    return optimizer
项目:chainer-segnet    作者:pfnet-research    | 项目源码 | 文件源码
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    if opt == 'MomentumSGD':
        optimizer.decay = weight_decay

    return optimizer
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def get_optimizer(self, name, lr, momentum=0.9):
        if name.lower() == "adam":
            return optimizers.Adam(alpha=lr, beta1=momentum)
        if name.lower() == "smorms3":
            return optimizers.SMORMS3(lr=lr)
        if name.lower() == "adagrad":
            return optimizers.AdaGrad(lr=lr)
        if name.lower() == "adadelta":
            return optimizers.AdaDelta(rho=momentum)
        if name.lower() == "nesterov" or name.lower() == "nesterovag":
            return optimizers.NesterovAG(lr=lr, momentum=momentum)
        if name.lower() == "rmsprop":
            return optimizers.RMSprop(lr=lr, alpha=momentum)
        if name.lower() == "momentumsgd":
            return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
        if name.lower() == "sgd":
            return optimizers.SGD(lr=lr)
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return optimizers.SGD(lr=lr)
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def update_momentum(self, momentum):
        if isinstance(self.optimizer, optimizers.Adam):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, Eve):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, optimizers.AdaDelta):
            self.optimizer.rho = momentum
            return
        if isinstance(self.optimizer, optimizers.NesterovAG):
            self.optimizer.momentum = momentum
            return
        if isinstance(self.optimizer, optimizers.RMSprop):
            self.optimizer.alpha = momentum
            return
        if isinstance(self.optimizer, optimizers.MomentumSGD):
            self.optimizer.mommentum = momentum
            return
项目:wavenet    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return chainer.optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return chainer.optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return chainer.optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return chainer.optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return chainer.optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return chainer.optimizers.SGD(lr=lr)
    raise Exception()
项目:wavenet    作者:musyoku    | 项目源码 | 文件源码
def update_momentum(self, momentum):
        if isinstance(self.optimizer, optimizers.Adam):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, Eve):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, optimizers.AdaDelta):
            self.optimizer.rho = momentum
            return
        if isinstance(self.optimizer, optimizers.NesterovAG):
            self.optimizer.momentum = momentum
            return
        if isinstance(self.optimizer, optimizers.RMSprop):
            self.optimizer.alpha = momentum
            return
        if isinstance(self.optimizer, optimizers.MomentumSGD):
            self.optimizer.mommentum = momentum
            return
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def update_momentum(self, momentum):
        if isinstance(self._optimizer, optimizers.Adam):
            self._optimizer.beta1 = momentum
            return
        if isinstance(self._optimizer, Eve):
            self._optimizer.beta1 = momentum
            return
        if isinstance(self._optimizer, optimizers.AdaDelta):
            self._optimizer.rho = momentum
            return
        if isinstance(self._optimizer, optimizers.NesterovAG):
            self._optimizer.momentum = momentum
            return
        if isinstance(self._optimizer, optimizers.RMSprop):
            self._optimizer.alpha = momentum
            return
        if isinstance(self._optimizer, optimizers.MomentumSGD):
            self._optimizer.mommentum = momentum
            return
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return optimizers.SGD(lr=lr)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def test_share_states(self):

        model = L.Linear(2, 2)
        opt_a = optimizers.RMSprop()
        opt_a.setup(model)
        arrays = async.share_states_as_shared_arrays(opt_a)
        opt_b = optimizers.RMSprop()
        opt_b.setup(copy.deepcopy(model))
        # In Chainer v2, a model cannot be set up by two optimizers or more.

        opt_c = optimizers.RMSprop()
        opt_c.setup(copy.deepcopy(model))

        """
        Removed the tests by assert_different_pointers
        since they are trivial now.
        """

        async.set_shared_states(opt_b, arrays)
        async.set_shared_states(opt_c, arrays)

        def assert_same_pointers(a, b):
            a = a.target
            b = b.target
            for param_name, param_a in a.namedparams():
                param_b = dict(b.namedparams())[param_name]
                state_a = param_a.update_rule.state
                state_b = param_b.update_rule.state
                self.assertTrue(state_a)
                self.assertTrue(state_b)
                for state_name, state_val_a in state_a.items():
                    state_val_b = state_b[state_name]
                    self.assertTrue(isinstance(
                        state_val_a, np.ndarray))
                    self.assertTrue(isinstance(
                        state_val_b, np.ndarray))
                    self.assertEqual(state_val_a.ctypes.data,
                                     state_val_b.ctypes.data)

        assert_same_pointers(opt_a, opt_b)
        assert_same_pointers(opt_a, opt_c)
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, decay=0.9, eps=1.0, weight_decay=4.0e-5, clip=2.0):
        super(OptimizerGooglenetV3, self).__init__(model)
        optimizer = optimizers.RMSprop(lr, decay, eps)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        clip = chainer.optimizer.GradientClipping(clip)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        optimizer.add_hook(clip)
        self.optimizer = optimizer
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, decay=0.9, eps=1.0, weight_decay=4.0e-5, clip=2.0):
        super(OptimizerGooglenetV3, self).__init__(model)
        optimizer = optimizers.RMSprop(lr, decay, eps)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        clip = chainer.optimizer.GradientClipping(clip)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        optimizer.add_hook(clip)
        self.optimizer = optimizer
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def create(self):
        return optimizers.RMSprop(0.1)
项目:fractal_net    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, decay=0.9, eps=1.0, weight_decay=4.0e-5, clip=2.0):
        super(OptimizerGooglenetV3, self).__init__(model)
        optimizer = optimizers.RMSprop(lr, decay, eps)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        clip = chainer.optimizer.GradientClipping(clip)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        optimizer.add_hook(clip)
        self.optimizer = optimizer
项目:pyramidal_residual_networks    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, decay=0.9, eps=1.0, weight_decay=4.0e-5, clip=2.0):
        super(OptimizerGooglenetV3, self).__init__(model)
        optimizer = optimizers.RMSprop(lr, decay, eps)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        clip = chainer.optimizer.GradientClipping(clip)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        optimizer.add_hook(clip)
        self.optimizer = optimizer
项目:chainer-wasserstein-gan    作者:hvy    | 项目源码 | 文件源码
def train(args):
    nz = args.nz
    batch_size = args.batch_size
    epochs = args.epochs
    gpu = args.gpu

    # CIFAR-10 images in range [-1, 1] (tanh generator outputs)
    train, _ = datasets.get_cifar10(withlabel=False, ndim=3, scale=2)
    train -= 1.0
    train_iter = iterators.SerialIterator(train, batch_size)

    z_iter = RandomNoiseIterator(GaussianNoiseGenerator(0, 1, args.nz),
                                 batch_size)

    optimizer_generator = optimizers.RMSprop(lr=0.00005)
    optimizer_critic = optimizers.RMSprop(lr=0.00005)
    optimizer_generator.setup(Generator())
    optimizer_critic.setup(Critic())

    updater = WassersteinGANUpdater(
        iterator=train_iter,
        noise_iterator=z_iter,
        optimizer_generator=optimizer_generator,
        optimizer_critic=optimizer_critic,
        device=gpu)

    trainer = training.Trainer(updater, stop_trigger=(epochs, 'epoch'))
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.LogReport(trigger=(1, 'iteration')))
    trainer.extend(GeneratorSample(), trigger=(1, 'epoch'))
    trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'critic/loss',
            'critic/loss/real', 'critic/loss/fake', 'generator/loss']))
    trainer.run()
项目:googlenet_v2    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, decay=0.9, eps=1.0, weight_decay=4.0e-5, clip=2.0):
        super(OptimizerGooglenetV3, self).__init__(model)
        optimizer = optimizers.RMSprop(lr, decay, eps)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        clip = chainer.optimizer.GradientClipping(clip)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        optimizer.add_hook(clip)
        self.optimizer = optimizer
项目:shake_shake    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, decay=0.9, eps=1.0, weight_decay=4.0e-5, clip=2.0):
        super(OptimizerGooglenetV3, self).__init__(model)
        optimizer = optimizers.RMSprop(lr, decay, eps)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        clip = chainer.optimizer.GradientClipping(clip)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        optimizer.add_hook(clip)
        self.optimizer = optimizer