Python torch.optim 模块,Optimizer() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用torch.optim.Optimizer()

项目:pytorch-planet-amazon    作者:rwightman    | 项目源码 | 文件源码
def __init__(self, optimizer, last_epoch=-1):
        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(
                type(optimizer).__name__))
        self.optimizer = optimizer
        if last_epoch == -1:
            for group in optimizer.param_groups:
                group.setdefault('initial_lr', group['lr'])
        else:
            for i, group in enumerate(optimizer.param_groups):
                if 'initial_lr' not in group:
                    raise KeyError("param 'initial_lr' is not specified "
                                   "in param_groups[{}] when resuming an optimizer".format(i))
        self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
        self.step(last_epoch + 1)
        self.last_epoch = last_epoch
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def optimizer(self, optimizer: Union[str, type]):
        if isinstance(optimizer, str):
            optimizer = getattr(optim, optimizer)
        if not issubclass(optimizer, optim.Optimizer):
            raise TypeError("`optimizer` must be a torch.optim.Optim or a string which refers to one by name")
        self._optimizer = optimizer
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def _single_batch_train_pass(self, X_batch: TensorType, y_batch: TensorType, optimizer: optim.Optimizer):
        module = self.torch_module
        module.zero_grad()
        optimizer.zero_grad()
        err = self._single_batch_test_pass(X_batch, y_batch)
        err.backward()
        optimizer.step()
        return err
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def __init__(self, torch_module: nn.Module, loss_func: Union[loss._Loss, type, str],
                 optimizer: Union[str, optim.Optimizer],
                 classes: List[T2],
                 loss_func_kwargs: Opt[dict]=None,
                 optimizer_kwargs: Opt[dict]=None,
                 input_encoder: Opt[Callable[[T1], TensorType]]=None,
                 estimate_normalization_samples: Opt[int]=None,
                 default_batch_size: int=DEFAULT_BATCH_SIZE,
                 stopping_criterion: Callable[[List[float], Opt[List[float]]], Union[bool, Tuple[bool, Opt[str]]]]=
                    DEFAULT_STOPPING_CRITERION,
                 print_func: Callable[[Any], None]=print,
                 num_dataloader_workers: int=-2):
        class_to_int = dict(zip(classes, range(len(classes))))
        int_to_class = dict(map(reversed, class_to_int.items()))
        target_encoder = class_to_int.__getitem__
        self.class_to_int = class_to_int
        self.int_to_class = int_to_class
        self.num_classes = len(class_to_int)
        super(TorchClassifierModel, self).__init__(torch_module=torch_module, loss_func=loss_func, optimizer=optimizer,
                                                   loss_func_kwargs=loss_func_kwargs, optimizer_kwargs=optimizer_kwargs,
                                                   input_encoder=input_encoder, target_encoder=target_encoder,
                                                   output_decoder=self._get_classes,
                                                   is_classifier=True,
                                                   estimate_normalization_samples=estimate_normalization_samples,
                                                   default_batch_size=default_batch_size,
                                                   stopping_criterion=stopping_criterion,
                                                   print_func=print_func, num_dataloader_workers=num_dataloader_workers)
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def __init__(self, torch_module: nn.Module, loss_func: loss._Loss,
                 optimizer: optim.Optimizer,
                 loss_func_kwargs: Opt[dict]=None,
                 optimizer_kwargs: Opt[dict]=None,
                 input_encoder: Opt[Callable[[T1], TensorType]]=None,
                 target_encoder: Opt[Callable[[T2], TensorType]]=None,
                 output_decoder: Opt[Callable[[TensorType], T2]]=None,
                 clip_grad_norm: Opt[float]=None,
                 is_classifier: bool=False,
                 flatten_targets: bool=True,
                 flatten_output: bool=True,
                 bptt_len: int=20,
                 estimate_normalization_samples: Opt[int]=None,
                 default_batch_size: int=DEFAULT_BATCH_SIZE,
                 stopping_criterion: Callable[[List[float], Opt[List[float]]], Union[bool, Tuple[bool, Opt[str]]]] =
                    DEFAULT_STOPPING_CRITERION,
                 print_func: Callable[[Any], None]=print, num_dataloader_workers: int=-2):
        super(TorchSequenceModel, self).__init__(torch_module=torch_module, loss_func=loss_func, optimizer=optimizer,
                                                 loss_func_kwargs=loss_func_kwargs, optimizer_kwargs=optimizer_kwargs,
                                                 input_encoder=input_encoder,
                                                 target_encoder=target_encoder, output_decoder=output_decoder,
                                                 is_classifier=is_classifier,
                                                 estimate_normalization_samples=estimate_normalization_samples,
                                                 default_batch_size=default_batch_size,
                                                 stopping_criterion=stopping_criterion,
                                                 print_func=print_func, num_dataloader_workers=num_dataloader_workers)

        self.flatten_targets = flatten_targets
        self.flatten_output = flatten_output
        self.clip_grad_norm = clip_grad_norm
        self.bptt_len = bptt_len
项目:sk-torch    作者:mattHawthorn    | 项目源码 | 文件源码
def _single_batch_train_pass(self, X_batch: TensorType, y_batch: TensorType, optimizer: optim.Optimizer):
        module = self.torch_module
        optimizer.zero_grad()
        err = self._single_batch_test_pass(X_batch, y_batch)
        err.backward()
        if self.clip_grad:
            clip_grad_norm(module.parameters(), self.clip_grad_norm)
        optimizer.step()
        return err
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def __init__(self, hps: HyperParams):
        self.hps = hps
        self.net = getattr(models, hps.net)(hps)
        self.bce_loss = nn.BCELoss()
        self.mse_loss = nn.MSELoss()
        self.optimizer = None  # type: optim.Optimizer
        self.tb_logger = None  # type: tensorboard_logger.Logger
        self.logdir = None  # type: Path
        self.on_gpu = torch.cuda.is_available()
        if self.on_gpu:
            self.net.cuda()
项目:pytorch-planet-amazon    作者:rwightman    | 项目源码 | 文件源码
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
                 verbose=False, threshold=1e-4, threshold_mode='rel',
                 cooldown=0, min_lr=0, eps=1e-8):

        if factor >= 1.0:
            raise ValueError('Factor should be < 1.0.')
        self.factor = factor

        if not isinstance(optimizer, Optimizer):
            raise TypeError('{} is not an Optimizer'.format(
                type(optimizer).__name__))
        self.optimizer = optimizer

        if isinstance(min_lr, list) or isinstance(min_lr, tuple):
            if len(min_lr) != len(optimizer.param_groups):
                raise ValueError("expected {} min_lrs, got {}".format(
                    len(optimizer.param_groups), len(min_lr)))
            self.min_lrs = list(min_lr)
        else:
            self.min_lrs = [min_lr] * len(optimizer.param_groups)

        self.patience = patience
        self.verbose = verbose
        self.cooldown = cooldown
        self.cooldown_counter = 0
        self.mode = mode
        self.threshold = threshold
        self.threshold_mode = threshold_mode
        self.best = None
        self.num_bad_epochs = None
        self.mode_worse = None  # the worse value for the chosen mode
        self.is_better = None
        self.eps = eps
        self.last_epoch = -1
        self._init_is_better(mode=mode, threshold=threshold,
                             threshold_mode=threshold_mode)
        self._reset()