cpu_adam.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. #pragma once
  2. #define NOMINMAX // Windows idiosyncrasy
  3. // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c
  4. #include <cuda_fp16.h>
  5. #include <cuda_runtime_api.h>
  6. #include <stdio.h>
  7. #include <cassert>
  8. #include "cuda.h"
  9. #include "custom_cuda_layers.h"
  10. #include "simd.h"
  11. #define STEP(SPAN) \
  12. void Step_##SPAN(float* _params, \
  13. float* grads, \
  14. float* _exp_avg, \
  15. float* _exp_avg_sq, \
  16. size_t _param_size, \
  17. __half* dev_param = nullptr, \
  18. bool half_precision = false);
  19. class Adam_Optimizer {
  20. public:
  21. Adam_Optimizer(float alpha = 1e-3,
  22. float betta1 = 0.9,
  23. float betta2 = 0.999,
  24. float eps = 1e-8,
  25. float weight_decay = 0,
  26. bool adamw_mode = true)
  27. : _alpha(alpha),
  28. _betta1(betta1),
  29. _betta2(betta2),
  30. _eps(eps),
  31. _weight_decay(weight_decay),
  32. _betta1_t(1.0),
  33. _betta2_t(1.0),
  34. _step(0),
  35. _buf_index(false),
  36. _adamw_mode(adamw_mode)
  37. {
  38. cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
  39. cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
  40. _streams[0] = Context::Instance().GetCurrentStream();
  41. _streams[1] = Context::Instance().GetNewStream();
  42. }
  43. ~Adam_Optimizer()
  44. {
  45. cudaFreeHost(_doubled_buffer[0]);
  46. cudaFreeHost(_doubled_buffer[1]);
  47. }
  48. #if defined(__AVX512__) or defined(__AVX256__)
  49. template <int span>
  50. void Step_AVX(size_t* rounded_size,
  51. float* _params,
  52. float* grads,
  53. float* _exp_avg,
  54. float* _exp_avg_sq,
  55. size_t param_size,
  56. __half* dev_param = nullptr,
  57. bool half_precision = false);
  58. #endif
  59. STEP(1)
  60. STEP(4)
  61. STEP(8)
  62. inline void SynchronizeStreams()
  63. {
  64. for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]);
  65. }
  66. inline void IncrementStep(size_t step, float beta1, float beta2)
  67. {
  68. if (beta1 != _betta1 || beta2 != _betta2) {
  69. _step = step;
  70. _betta1 = beta1;
  71. _betta2 = beta2;
  72. _betta1_t = std::pow(_betta1, step);
  73. _betta2_t = std::pow(_betta2, step);
  74. } else {
  75. _step++;
  76. if (_step != step) {
  77. _betta1_t = std::pow(_betta1, step);
  78. _betta2_t = std::pow(_betta2, step);
  79. _step = step;
  80. } else {
  81. _betta1_t *= _betta1;
  82. _betta2_t *= _betta2;
  83. }
  84. }
  85. }
  86. inline void update_state(float lr, float epsilon, float weight_decay, bool bias_correction)
  87. {
  88. _alpha = lr;
  89. _eps = epsilon;
  90. _weight_decay = weight_decay;
  91. _bias_correction1 = 1.0f;
  92. _bias_correction2 = 1.0f;
  93. if (bias_correction == 1) {
  94. _bias_correction1 = 1 - _betta1_t;
  95. _bias_correction2 = 1 / sqrt(1 - _betta2_t);
  96. }
  97. }
  98. private:
  99. float _alpha;
  100. float _betta1;
  101. float _betta2;
  102. float _eps;
  103. float _weight_decay;
  104. float _betta1_t;
  105. float _betta2_t;
  106. size_t _step;
  107. float _bias_correction1;
  108. float _bias_correction2;
  109. float* _doubled_buffer[2];
  110. bool _buf_index;
  111. bool _adamw_mode;
  112. cudaStream_t _streams[2];
  113. };
  114. #if defined(__AVX512__) or defined(__AVX256__)
  115. template <int span>
  116. void Adam_Optimizer::Step_AVX(size_t* rounded_size,
  117. float* _params,
  118. float* grads,
  119. float* _exp_avg,
  120. float* _exp_avg_sq,
  121. size_t _param_size,
  122. __half* dev_params,
  123. bool half_precision)
  124. {
  125. size_t new_rounded_size = 0;
  126. AVX_Data betta1_4;
  127. betta1_4.data = SIMD_SET(_betta1);
  128. AVX_Data betta2_4;
  129. betta2_4.data = SIMD_SET(_betta2);
  130. float betta1_minus1 = 1 - _betta1;
  131. float betta2_minus1 = 1 - _betta2;
  132. AVX_Data betta1_minus1_4;
  133. betta1_minus1_4.data = SIMD_SET(betta1_minus1);
  134. AVX_Data betta2_minus1_4;
  135. betta2_minus1_4.data = SIMD_SET(betta2_minus1);
  136. AVX_Data bias2_sqrt;
  137. bias2_sqrt.data = SIMD_SET(_bias_correction2);
  138. AVX_Data eps_4;
  139. eps_4.data = SIMD_SET(_eps);
  140. float step_size = -1 * _alpha / _bias_correction1;
  141. AVX_Data step_size_4;
  142. step_size_4.data = SIMD_SET(step_size);
  143. float w_decay = -1 * _alpha * _weight_decay;
  144. AVX_Data weight_decay4;
  145. if (_weight_decay > 0)
  146. weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay));
  147. new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span);
  148. for (size_t t = 0; t < new_rounded_size; t += TILE) {
  149. size_t copy_size = TILE;
  150. if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t;
  151. size_t offset = copy_size + t;
  152. if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
  153. #pragma omp parallel for
  154. for (size_t i = t; i < offset; i += SIMD_WIDTH * span) {
  155. AVX_Data grad_4[span];
  156. simd_load<span>(grad_4, grads + i, half_precision);
  157. AVX_Data momentum_4[span];
  158. simd_load<span>(momentum_4, _exp_avg + i, false);
  159. AVX_Data variance_4[span];
  160. simd_load<span>(variance_4, _exp_avg_sq + i, false);
  161. AVX_Data param_4[span];
  162. simd_load<span>(param_4, _params + i, half_precision);
  163. if (_weight_decay > 0 && !_adamw_mode) {
  164. simd_fma<span>(grad_4, param_4, weight_decay4, grad_4);
  165. }
  166. simd_mul<span>(momentum_4, momentum_4, betta1_4);
  167. simd_fma<span>(momentum_4, grad_4, betta1_minus1_4, momentum_4);
  168. simd_mul<span>(variance_4, variance_4, betta2_4);
  169. simd_mul<span>(grad_4, grad_4, grad_4);
  170. simd_fma<span>(variance_4, grad_4, betta2_minus1_4, variance_4);
  171. simd_sqrt<span>(grad_4, variance_4);
  172. simd_fma<span>(grad_4, grad_4, bias2_sqrt, eps_4);
  173. simd_div<span>(grad_4, momentum_4, grad_4);
  174. if (_weight_decay > 0 && _adamw_mode) {
  175. simd_fma<span>(param_4, param_4, weight_decay4, param_4);
  176. }
  177. simd_fma<span>(param_4, grad_4, step_size_4, param_4);
  178. simd_store<span>(_params + i, param_4, half_precision);
  179. if (dev_params) {
  180. simd_store<span>(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision);
  181. }
  182. simd_store<span>(_exp_avg + i, momentum_4, false);
  183. simd_store<span>(_exp_avg_sq + i, variance_4, false);
  184. }
  185. if (dev_params) {
  186. if (half_precision)
  187. launch_param_update_half(
  188. _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
  189. else
  190. launch_param_update(
  191. _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
  192. _buf_index = !_buf_index;
  193. }
  194. }
  195. *rounded_size = new_rounded_size;
  196. }
  197. #endif