Python chainer.optimizers 模块,MomentumSGD() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.optimizers.MomentumSGD()

项目:chainer-faster-rcnn    作者:mitmul    | 项目源码 | 文件源码
def get_optimizer(model, opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    optimizer.setup(model)
    if opt == 'MomentumSGD':
        optimizer.add_hook(
            chainer.optimizer.WeightDecay(weight_decay))

    return optimizer
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, schedule=(250, 375), weight_decay=1.0e-4):
        super(OptimizerStochasticDepth, self).__init__(model)
        self.lr = lr
        self.momentum = momentum
        self.schedule = schedule
        self.weight_decay = weight_decay
        all_links = OptimizerStochasticDepth._find(model)
        optimizer_set = []
        for link in all_links:
            optimizer = optimizers.MomentumSGD(lr, momentum)
            weight_decay = chainer.optimizer.WeightDecay(self.weight_decay)
            optimizer.setup(link[0])
            optimizer.add_hook(weight_decay)
            optimizer_set.append(optimizer)
        self.optimizer_set = optimizer_set
        self.all_links = all_links
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, schedule=(250, 375), weight_decay=1.0e-4):
        super(OptimizerResnetOfResnet, self).__init__(model)
        self.lr = lr
        self.momentum = momentum
        self.schedule = schedule
        self.weight_decay = weight_decay
        all_links = OptimizerStochasticDepth._find(model)
        optimizer_set = []
        for link in all_links:
            optimizer = optimizers.MomentumSGD(lr, momentum)
            weight_decay = chainer.optimizer.WeightDecay(self.weight_decay)
            optimizer.setup(link[0])
            optimizer.add_hook(weight_decay)
            optimizer_set.append(optimizer)
        self.optimizer_set = optimizer_set
        self.all_links = all_links
项目:chainer-segnet    作者:pfnet-research    | 项目源码 | 文件源码
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None,
                  adam_beta2=None, adam_eps=None, weight_decay=None):
    if opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9)
    elif opt == 'Adam':
        optimizer = optimizers.Adam(
            alpha=adam_alpha, beta1=adam_beta1,
            beta2=adam_beta2, eps=adam_eps)
    elif opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=lr)
    elif opt == 'RMSprop':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        raise Exception('No optimizer is selected')

    # The first model as the master model
    if opt == 'MomentumSGD':
        optimizer.decay = weight_decay

    return optimizer
项目:chainer-segnet    作者:pfnet-research    | 项目源码 | 文件源码
def test_remove_link(self):
        opt = optimizers.MomentumSGD(lr=0.01)
        # Update each depth
        for depth in six.moves.range(1, self.n_encdec + 1):
            model = segnet.SegNet(self.n_encdec, self.n_classes,
                                  self.x_shape[1], self.n_mid)
            model = segnet.SegNetLoss(
                model, class_weight=None, train_depth=depth)
            opt.setup(model)

            # Deregister non-target links from opt
            if depth > 1:
                model.predictor.remove_link('conv_cls')
            for d in range(1, self.n_encdec + 1):
                if d != depth:
                    model.predictor.remove_link('encdec{}'.format(d))

            for name, link in model.namedparams():
                if depth > 1:
                    self.assertTrue(
                        'encdec{}'.format(depth) in name)
                else:
                    self.assertTrue(
                        'encdec{}'.format(depth) in name or 'conv_cls' in name)
项目:chainer-speech-recognition    作者:musyoku    | 项目源码 | 文件源码
def decay_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def decay_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
项目:fractal_net    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.02, momentum=0.9, schedule=(200, 300, 350, 375), weight_decay=1.0e-4):
        super(OptimizerFractalNet, self).__init__(model)
        self.lr = lr
        self.momentum = momentum
        self.schedule = schedule
        all_links = OptimizerFractalNet._find(model)
        optimizer_set = []
        for link in all_links:
            wd = chainer.optimizer.WeightDecay(weight_decay)
            optimizer = optimizers.MomentumSGD(lr, momentum)
            optimizer.setup(link[0])
            optimizer.add_hook(wd)
            optimizer_set.append(optimizer)
        self.optimizer_set = optimizer_set
        self.all_links = all_links
        self.flag = False
项目:fractal_net    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, schedule=(250, 375), weight_decay=1.0e-4):
        super(OptimizerStochasticDepth, self).__init__(model)
        self.lr = lr
        self.momentum = momentum
        self.schedule = schedule
        self.weight_decay = weight_decay
        all_links = OptimizerStochasticDepth._find(model)
        optimizer_set = []
        for link in all_links:
            optimizer = optimizers.MomentumSGD(lr, momentum)
            weight_decay = chainer.optimizer.WeightDecay(self.weight_decay)
            optimizer.setup(link[0])
            optimizer.add_hook(weight_decay)
            optimizer_set.append(optimizer)
        self.optimizer_set = optimizer_set
        self.all_links = all_links
        self.flag = False
项目:fractal_net    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, schedule=(250, 375), weight_decay=1.0e-4):
        super(OptimizerResnetOfResnet, self).__init__(model)
        self.lr = lr
        self.momentum = momentum
        self.schedule = schedule
        self.weight_decay = weight_decay
        all_links = OptimizerStochasticDepth._find(model)
        optimizer_set = []
        for link in all_links:
            optimizer = optimizers.MomentumSGD(lr, momentum)
            weight_decay = chainer.optimizer.WeightDecay(self.weight_decay)
            optimizer.setup(link[0])
            optimizer.add_hook(weight_decay)
            optimizer_set.append(optimizer)
        self.optimizer_set = optimizer_set
        self.all_links = all_links
        self.flag = False
项目:fractal_net    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.5, momentum=0.9, schedule=(150, 225), weight_decay=1.0e-4):
        super(OptimizerPyramidalResNetWithSSD, self).__init__(model)
        self.lr = lr
        self.momentum = momentum
        self.schedule = schedule
        self.weight_decay = weight_decay
        all_links = OptimizerPyramidalResNetWithSSD._find(model)
        optimizer_set = []
        for link in all_links:
            optimizer = optimizers.MomentumSGD(lr, momentum)
            weight_decay = chainer.optimizer.WeightDecay(self.weight_decay)
            optimizer.setup(link[0])
            optimizer.add_hook(weight_decay)
            optimizer_set.append(optimizer)
        self.optimizer_set = optimizer_set
        self.all_links = all_links
        self.flag = False
项目:ddnn    作者:kunglab    | 项目源码 | 文件源码
def get_optimizer(self, name, lr, momentum=0.9):
        if name.lower() == "adam":
            return optimizers.Adam(alpha=lr, beta1=momentum)
        if name.lower() == "smorms3":
            return optimizers.SMORMS3(lr=lr)
        if name.lower() == "adagrad":
            return optimizers.AdaGrad(lr=lr)
        if name.lower() == "adadelta":
            return optimizers.AdaDelta(rho=momentum)
        if name.lower() == "nesterov" or name.lower() == "nesterovag":
            return optimizers.NesterovAG(lr=lr, momentum=momentum)
        if name.lower() == "rmsprop":
            return optimizers.RMSprop(lr=lr, alpha=momentum)
        if name.lower() == "momentumsgd":
            return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
        if name.lower() == "sgd":
            return optimizers.SGD(lr=lr)
项目:adversarial-autoencoder    作者:musyoku    | 项目源码 | 文件源码
def decrease_learning_rate(opt, factor, final_value):
    if isinstance(opt, optimizers.NesterovAG):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.SGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.MomentumSGD):
        if opt.lr <= final_value:
            return final_value
        opt.lr *= factor
        return
    if isinstance(opt, optimizers.Adam):
        if opt.alpha <= final_value:
            return final_value
        opt.alpha *= factor
        return
    raise NotImplementedError()
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return optimizers.SGD(lr=lr)
项目:unrolled-gan    作者:musyoku    | 项目源码 | 文件源码
def update_momentum(self, momentum):
        if isinstance(self.optimizer, optimizers.Adam):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, Eve):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, optimizers.AdaDelta):
            self.optimizer.rho = momentum
            return
        if isinstance(self.optimizer, optimizers.NesterovAG):
            self.optimizer.momentum = momentum
            return
        if isinstance(self.optimizer, optimizers.RMSprop):
            self.optimizer.alpha = momentum
            return
        if isinstance(self.optimizer, optimizers.MomentumSGD):
            self.optimizer.mommentum = momentum
            return
项目:wavenet    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return chainer.optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return chainer.optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return chainer.optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return chainer.optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return chainer.optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return chainer.optimizers.SGD(lr=lr)
    raise Exception()
项目:wavenet    作者:musyoku    | 项目源码 | 文件源码
def update_momentum(self, momentum):
        if isinstance(self.optimizer, optimizers.Adam):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, Eve):
            self.optimizer.beta1 = momentum
            return
        if isinstance(self.optimizer, optimizers.AdaDelta):
            self.optimizer.rho = momentum
            return
        if isinstance(self.optimizer, optimizers.NesterovAG):
            self.optimizer.momentum = momentum
            return
        if isinstance(self.optimizer, optimizers.RMSprop):
            self.optimizer.alpha = momentum
            return
        if isinstance(self.optimizer, optimizers.MomentumSGD):
            self.optimizer.mommentum = momentum
            return
项目:LSGAN    作者:musyoku    | 项目源码 | 文件源码
def update_momentum(self, momentum):
        if isinstance(self._optimizer, optimizers.Adam):
            self._optimizer.beta1 = momentum
            return
        if isinstance(self._optimizer, Eve):
            self._optimizer.beta1 = momentum
            return
        if isinstance(self._optimizer, optimizers.AdaDelta):
            self._optimizer.rho = momentum
            return
        if isinstance(self._optimizer, optimizers.NesterovAG):
            self._optimizer.momentum = momentum
            return
        if isinstance(self._optimizer, optimizers.RMSprop):
            self._optimizer.alpha = momentum
            return
        if isinstance(self._optimizer, optimizers.MomentumSGD):
            self._optimizer.mommentum = momentum
            return
项目:adgm    作者:musyoku    | 项目源码 | 文件源码
def get_optimizer(name, lr, momentum=0.9):
    if name.lower() == "adam":
        return optimizers.Adam(alpha=lr, beta1=momentum)
    if name.lower() == "eve":
        return Eve(alpha=lr, beta1=momentum)
    if name.lower() == "adagrad":
        return optimizers.AdaGrad(lr=lr)
    if name.lower() == "adadelta":
        return optimizers.AdaDelta(rho=momentum)
    if name.lower() == "nesterov" or name.lower() == "nesterovag":
        return optimizers.NesterovAG(lr=lr, momentum=momentum)
    if name.lower() == "rmsprop":
        return optimizers.RMSprop(lr=lr, alpha=momentum)
    if name.lower() == "momentumsgd":
        return optimizers.MomentumSGD(lr=lr, mommentum=mommentum)
    if name.lower() == "sgd":
        return optimizers.SGD(lr=lr)
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(int(32000. / (50000. / 128)), int(48000. / (50000. / 128))), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01):
        super(OptimizerResnet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(warm_up_lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.warmup_lr = warm_up_lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(150, 225), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerDense, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(60, 120, 160), lr=0.1, momentum=0.9, weight_decay=5.0e-4):
        super(OptimizerWideRes, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(196, 224), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerSwapout, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=1.0e-5, period=2):
        super(OptimizerXception, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.period = int(period)
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.0015, momentum=0.9, weight_decay=2.0e-4):
        super(OptimizerGooglenet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=1.0e-4, schedule=(int(1.0e5 / (50000. / 128)), )):
        super(OptimizerNetworkInNetwork, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.schedule = schedule
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=4.0e-5):
        super(OptimizerGooglenetV2, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:googlenet_v3    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, weight_decay=4.0e-5, clip=2.0):
        super(OptimizerGooglenetV3, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, 0.9)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        clip = chainer.optimizer.GradientClipping(clip)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        optimizer.add_hook(clip)
        self.optimizer = optimizer
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(int(32000. / (50000. / 128)), int(48000. / (50000. / 128))), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01):
        super(OptimizerResnet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(warm_up_lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.warmup_lr = warm_up_lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(150, 225), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerDense, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(60, 120, 160), lr=0.1, momentum=0.9, weight_decay=5.0e-4):
        super(OptimizerWideRes, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(196, 224), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerSwapout, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=1.0e-5, period=2):
        super(OptimizerXception, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.period = int(period)
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.0015, momentum=0.9, weight_decay=2.0e-4):
        super(OptimizerGooglenet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=1.0e-4, schedule=(int(1.0e5 / (50000. / 128)), )):
        super(OptimizerNetworkInNetwork, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.schedule = schedule
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=4.0e-5):
        super(OptimizerGooglenetV2, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:resnext    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=5.0e-4, schedule=(150, 225)):
        super(OptimizerResNext, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(int(32000. / (50000. / 128)), int(48000. / (50000. / 128))), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01):
        super(OptimizerResnet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(warm_up_lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.warmup_lr = warm_up_lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(150, 225), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerDense, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(60, 120, 160), lr=0.1, momentum=0.9, weight_decay=5.0e-4):
        super(OptimizerWideRes, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(196, 224), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerSwapout, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=1.0e-5, period=2):
        super(OptimizerXception, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.period = int(period)
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.0015, momentum=0.9, weight_decay=2.0e-4):
        super(OptimizerGooglenet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=1.0e-4, schedule=(int(1.0e5 / (50000. / 128)), )):
        super(OptimizerNetworkInNetwork, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.schedule = schedule
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=4.0e-5):
        super(OptimizerGooglenetV2, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=5.0e-4, schedule=(150, 225)):
        super(OptimizerResNext, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.02, momentum=0.9, schedule=(150, 225, 300, 375)):
        super(OptimizerFractalNet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        optimizer.setup(self.model)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, lr=0.01, momentum=0.9, schedule=(150, 225), weight_decay=5.0e-4):
        super(OptimizerPReLUNet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(42, 62), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerResnetInResnet, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
项目:neural_architecture_search_with_reinforcement_learning_appendix_a    作者:nutszebra    | 项目源码 | 文件源码
def __init__(self, model=None, schedule=(150, 175), lr=0.1, momentum=0.9, weight_decay=1.0e-4):
        super(OptimizerAppendixA, self).__init__(model)
        optimizer = optimizers.MomentumSGD(lr, momentum)
        weight_decay = chainer.optimizer.WeightDecay(weight_decay)
        optimizer.setup(self.model)
        optimizer.add_hook(weight_decay)
        self.optimizer = optimizer
        self.schedule = schedule
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay