| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175 |
- # coding=utf-8
- # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
- # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """PyTorch optimization for BERT model."""
- import math
- import torch
- from torch.optim import Optimizer
- from torch.optim.optimizer import required
- from torch.nn.utils import clip_grad_norm_
- #from fused_adam_local import FusedAdam
- from apex.optimizers import FusedAdam
- from apex.multi_tensor_apply import multi_tensor_applier
- import amp_C
- from utils import is_main_process
- multi_tensor_l2norm = amp_C.multi_tensor_l2norm
- lamb_compute_update = amp_C.multi_tensor_lamb_stage1_cuda
- lamb_apply_update = amp_C.multi_tensor_lamb_stage2_cuda
- scale = amp_C.multi_tensor_scale
- def warmup_cosine(x, warmup=0.002):
- if x < warmup:
- return x/warmup
- return 0.5 * (1.0 + torch.cos(math.pi * x))
- def warmup_constant(x, warmup=0.002):
- if x < warmup:
- return x/warmup
- return 1.0
- def warmup_linear(x, warmup=0.002):
- if x < warmup:
- return x/warmup
- return max((x - 1. )/ (warmup - 1.), 0.)
-
- def warmup_poly(x, warmup=0.002, degree=0.5):
- if x < warmup:
- return x/warmup
- return (1.0 - x)**degree
- SCHEDULES = {
- 'warmup_cosine':warmup_cosine,
- 'warmup_constant':warmup_constant,
- 'warmup_linear':warmup_linear,
- 'warmup_poly':warmup_poly,
- }
- class BertAdam(Optimizer):
- """Implements BERT version of Adam algorithm with weight decay fix.
- Params:
- lr: learning rate
- warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
- t_total: total number of training steps for the learning
- rate schedule, -1 means constant learning rate. Default: -1
- schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
- b1: Adams b1. Default: 0.9
- b2: Adams b2. Default: 0.999
- e: Adams epsilon. Default: 1e-6
- weight_decay: Weight decay. Default: 0.01
- max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
- """
- def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
- b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
- max_grad_norm=1.0):
- if lr is not required and lr < 0.0:
- raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
- if schedule not in SCHEDULES:
- raise ValueError("Invalid schedule parameter: {}".format(schedule))
- if not 0.0 <= warmup < 1.0 and not warmup == -1:
- raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
- if not 0.0 <= b1 < 1.0:
- raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
- if not 0.0 <= b2 < 1.0:
- raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
- if not e >= 0.0:
- raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
- defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
- b1=b1, b2=b2, e=e, weight_decay=weight_decay,
- max_grad_norm=max_grad_norm)
- super(BertAdam, self).__init__(params, defaults)
- def get_lr(self):
- lr = []
- for group in self.param_groups:
- for p in group['params']:
- state = self.state[p]
- if len(state) == 0:
- return [0]
- if group['t_total'] != -1:
- schedule_fct = SCHEDULES[group['schedule']]
- lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
- else:
- lr_scheduled = group['lr']
- lr.append(lr_scheduled)
- return lr
- def step(self, closure=None):
- """Performs a single optimization step.
- Arguments:
- closure (callable, optional): A closure that reevaluates the model
- and returns the loss.
- """
- loss = None
- if closure is not None:
- loss = closure()
-
- for group in self.param_groups:
- for p in group['params']:
- if p.grad is None:
- continue
- grad = p.grad.data
- if grad.is_sparse:
- raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
- state = self.state[p]
- # State initialization
- if len(state) == 0:
- state['step'] = 0
- # Exponential moving average of gradient values
- state['next_m'] = torch.zeros_like(p.data)
- # Exponential moving average of squared gradient values
- state['next_v'] = torch.zeros_like(p.data)
- next_m, next_v = state['next_m'], state['next_v']
- beta1, beta2 = group['b1'], group['b2']
- # Add grad clipping
- if group['max_grad_norm'] > 0:
- clip_grad_norm_(p, group['max_grad_norm'], error_if_nonfinite=False)
-
- # Decay the first and second moment running average coefficient
- # In-place operations to update the averages at the same time
- next_m.mul_(beta1).add_(1 - beta1, grad)
- next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
- update = next_m / (next_v.sqrt() + group['e'])
- # Just adding the square of the weights to the loss function is *not*
- # the correct way of using L2 regularization/weight decay with Adam,
- # since that will interact with the m and v parameters in strange ways.
- #
- # Instead we want to decay the weights in a manner that doesn't interact
- # with the m/v parameters. This is equivalent to adding the square
- # of the weights to the loss with plain (non-momentum) SGD.
- if group['weight_decay'] > 0.0:
- update += group['weight_decay'] * p.data
- if group['t_total'] != -1:
- schedule_fct = SCHEDULES[group['schedule']]
- lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
- else:
- lr_scheduled = group['lr']
- update_with_lr = lr_scheduled * update
- p.data.add_(-update_with_lr)
- state['step'] += 1
- return loss
|