fused_adam.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. """
  5. Copyright NVIDIA/apex
  6. This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4
  7. """
  8. import torch
  9. from .multi_tensor_apply import MultiTensorApply
  10. multi_tensor_applier = MultiTensorApply(2048 * 32)
  11. from deepspeed.accelerator import get_accelerator
  12. from deepspeed.ops.op_builder import FusedAdamBuilder
  13. class FusedAdam(torch.optim.Optimizer):
  14. """Implements Adam algorithm.
  15. Currently GPU-only. Requires Apex to be installed via
  16. ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
  17. This version of fused Adam implements 2 fusions.
  18. * Fusion of the Adam update's elementwise operations
  19. * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
  20. :class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
  21. or ``torch.optim.Adam`` with ``adam_w_mode=False``::
  22. opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
  23. ...
  24. opt.step()
  25. :class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
  26. you may choose any ``opt_level``::
  27. opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
  28. model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
  29. ...
  30. opt.step()
  31. In general, ``opt_level="O1"`` is recommended.
  32. .. warning::
  33. A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
  34. are now deprecated and unnecessary.
  35. Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
  36. Arguments:
  37. params (iterable): iterable of parameters to optimize or dicts defining
  38. parameter groups.
  39. lr (float, optional): learning rate. (default: 1e-3)
  40. betas (Tuple[float, float], optional): coefficients used for computing
  41. running averages of gradient and its square. (default: (0.9, 0.999))
  42. eps (float, optional): term added to the denominator to improve
  43. numerical stability. (default: 1e-8)
  44. weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
  45. amsgrad (boolean, optional): whether to use the AMSGrad variant of this
  46. algorithm from the paper `On the Convergence of Adam and Beyond`_
  47. (default: False) NOT SUPPORTED in FusedAdam!
  48. adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
  49. True for decoupled weight decay(also known as AdamW) (default: True)
  50. set_grad_none (bool, optional): whether set grad to None when zero_grad()
  51. method is called. (default: True)
  52. .. _Adam - A Method for Stochastic Optimization:
  53. https://arxiv.org/abs/1412.6980
  54. .. _On the Convergence of Adam and Beyond:
  55. https://openreview.net/forum?id=ryQu7f-RZ
  56. """
  57. def __init__(self,
  58. params,
  59. lr=1e-3,
  60. bias_correction=True,
  61. betas=(0.9, 0.999),
  62. eps=1e-8,
  63. adam_w_mode=True,
  64. weight_decay=0.,
  65. amsgrad=False,
  66. set_grad_none=True):
  67. if amsgrad:
  68. raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
  69. defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay)
  70. super(FusedAdam, self).__init__(params, defaults)
  71. self.adam_w_mode = 1 if adam_w_mode else 0
  72. self.set_grad_none = set_grad_none
  73. fused_adam_cuda = FusedAdamBuilder().load()
  74. # Skip buffer
  75. self._dummy_overflow_buf = get_accelerator().IntTensor([0])
  76. self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam
  77. def zero_grad(self):
  78. if self.set_grad_none:
  79. for group in self.param_groups:
  80. for p in group['params']:
  81. p.grad = None
  82. else:
  83. super(FusedAdam, self).zero_grad()
  84. def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None):
  85. """Performs a single optimization step.
  86. Arguments:
  87. closure (callable, optional): A closure that reevaluates the model
  88. and returns the loss.
  89. The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
  90. """
  91. if any(p is not None for p in [grads, output_params, scale, grad_norms]):
  92. raise RuntimeError(
  93. 'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.'
  94. )
  95. loss = None
  96. if closure is not None:
  97. loss = closure()
  98. for group in self.param_groups:
  99. if len(group['params']) == 0:
  100. continue
  101. bias_correction = 1 if group['bias_correction'] else 0
  102. beta1, beta2 = group['betas']
  103. # assume same step across group now to simplify things
  104. # per parameter step can be easily support by making it tensor, or pass list into kernel
  105. if 'step' not in group:
  106. group['step'] = 0
  107. # create lists for multi-tensor apply
  108. g_16, p_16, m_16, v_16 = [], [], [], []
  109. g_bf, p_bf, m_bf, v_bf = [], [], [], []
  110. g_32, p_32, m_32, v_32 = [], [], [], []
  111. for p in group['params']:
  112. if p.grad is None:
  113. continue
  114. if p.grad.data.is_sparse:
  115. raise RuntimeError(
  116. 'FusedAdam does not support sparse gradients, please consider SparseAdam instead')
  117. state = self.state[p]
  118. # State initialization
  119. if len(state) == 0:
  120. # DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately.
  121. # While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time.
  122. # In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists.
  123. state['step'] = group.get('step', 0)
  124. # Exponential moving average of gradient values
  125. state['exp_avg'] = torch.zeros_like(p.data)
  126. # Exponential moving average of squared gradient values
  127. state['exp_avg_sq'] = torch.zeros_like(p.data)
  128. if p.dtype == torch.float16:
  129. g_16.append(p.grad.data)
  130. p_16.append(p.data)
  131. m_16.append(state['exp_avg'])
  132. v_16.append(state['exp_avg_sq'])
  133. elif p.dtype == torch.bfloat16:
  134. g_bf.append(p.grad)
  135. p_bf.append(p)
  136. m_bf.append(state['exp_avg'])
  137. v_bf.append(state['exp_avg_sq'])
  138. elif p.dtype == torch.float32:
  139. g_32.append(p.grad.data)
  140. p_32.append(p.data)
  141. m_32.append(state['exp_avg'])
  142. v_32.append(state['exp_avg_sq'])
  143. else:
  144. raise RuntimeError('FusedAdam only support fp16, bf16 and fp32.')
  145. if len(g_16) > 0:
  146. state['step'] += 1
  147. multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16],
  148. group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
  149. bias_correction, group['weight_decay'])
  150. if len(g_bf) > 0:
  151. state['step'] += 1
  152. multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_bf, p_bf, m_bf, v_bf],
  153. group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
  154. bias_correction, group['weight_decay'])
  155. if len(g_32) > 0:
  156. state['step'] += 1
  157. multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32],
  158. group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
  159. bias_correction, group['weight_decay'])
  160. return loss