123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114 |
- import math
- import torch
- from torch.optim.optimizer import Optimizer
- import logging
- __all__ = ('Lamb',)
- logger = logging.getLogger(__name__)
- class Lamb(Optimizer):
- def __init__(
- self,
- params,
- lr: float = 1e-3,
- betas = (0.9, 0.999),
- eps: float = 1e-6,
- weight_decay: float = 0,
- clamp_value: float = 10,
- adam: bool = False,
- debias: bool = False,
- ) -> None:
- if lr <= 0.0:
- raise ValueError('Invalid learning rate: {}'.format(lr))
- if eps < 0.0:
- raise ValueError('Invalid epsilon value: {}'.format(eps))
- if not 0.0 <= betas[0] < 1.0:
- raise ValueError(
- 'Invalid beta parameter at index 0: {}'.format(betas[0])
- )
- if not 0.0 <= betas[1] < 1.0:
- raise ValueError(
- 'Invalid beta parameter at index 1: {}'.format(betas[1])
- )
- if weight_decay < 0:
- raise ValueError(
- 'Invalid weight_decay value: {}'.format(weight_decay)
- )
- if clamp_value < 0.0:
- raise ValueError('Invalid clamp value: {}'.format(clamp_value))
- defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
- self.clamp_value = clamp_value
- self.adam = adam
- self.debias = debias
- super(Lamb, self).__init__(params, defaults)
- def step(self, closure = None):
- r"""Performs a single optimization step.
- Arguments:
- closure: A closure that reevaluates the model and returns the loss.
- """
- loss = None
- if closure is not None:
- loss = closure()
- for group in self.param_groups:
- for p in group['params']:
- if p.grad is None:
- continue
- grad = p.grad.data
- if grad.is_sparse:
- msg = (
- 'Lamb does not support sparse gradients, '
- 'please consider SparseAdam instead'
- )
- raise RuntimeError(msg)
- state = self.state[p]
- # State initialization
- if len(state) == 0:
- state['step'] = 0
- # Exponential moving average of gradient values
- state['exp_avg'] = torch.zeros_like(
- p, )
- # Exponential moving average of squared gradient values
- state['exp_avg_sq'] = torch.zeros_like(p)
- exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
- beta1, beta2 = group['betas']
- state['step'] += 1
- # Decay the first and second moment running average coefficient
- # m_t
- exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
- # v_t
- exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
- # Apply bias to lr to avoid broadcast.
- step_size = group['lr']
- weight_norm = torch.norm(p.data).clamp(0, self.clamp_value)
- adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
- adam_step.add_(p.data, alpha=group['weight_decay'])
- adam_norm = torch.norm(adam_step).clamp_min(0.001)
- trust_ratio = weight_norm / adam_norm
- state['weight_norm'] = weight_norm
- state['adam_norm'] = adam_norm
- state['trust_ratio'] = trust_ratio
- if self.adam:
- trust_ratio = 1
- p.data.add_(adam_step, alpha=-step_size * trust_ratio)
- return loss
|