constants.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. #############################################
  5. # Routes
  6. #############################################
  7. ROUTE_TRAIN = "train"
  8. ROUTE_EVAL = "eval"
  9. ROUTE_PREDICT = "predict"
  10. ROUTE_ENCODE = "encode"
  11. #############################################
  12. # Batch size
  13. #############################################
  14. TRAIN_BATCH_SIZE = "train_batch_size"
  15. TRAIN_BATCH_SIZE_DEFAULT = None
  16. #############################################
  17. # Sparse attention
  18. #############################################
  19. SPARSE_ATTENTION = "sparse_attention"
  20. SPARSE_DENSE_MODE = "dense"
  21. SPARSE_FIXED_MODE = "fixed"
  22. SPARSE_VARIABLE_MODE = "variable"
  23. SPARSE_BIGBIRD_MODE = "bigbird"
  24. SPARSE_BSLONGFORMER_MODE = "bslongformer"
  25. SPARSE_MODE = "mode"
  26. SPARSE_MODE_DEFAULT = SPARSE_FIXED_MODE
  27. SPARSE_BLOCK = "block"
  28. SPARSE_BLOCK_DEFAULT = 16
  29. SPARSE_DIFFERENT_LAYOUT_PER_HEAD = "different_layout_per_head"
  30. SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT = False
  31. SPARSE_NUM_LOCAL_BLOCKS = "num_local_blocks"
  32. SPARSE_NUM_LOCAL_BLOCKS_DEFAULT = 4
  33. SPARSE_NUM_GLOBAL_BLOCKS = "num_global_blocks"
  34. SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT = 1
  35. SPARSE_ATTENTION_TYPE = "attention"
  36. SPARSE_ATTENTION_TYPE_DEFAULT = "bidirectional"
  37. SPARSE_HORIZONTAL_GLOBAL_ATTENTION = "horizontal_global_attention"
  38. SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT = False
  39. SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS = "num_different_global_patterns"
  40. SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT = 1
  41. SPARSE_NUM_RANDOM_BLOCKS = "num_random_blocks"
  42. SPARSE_NUM_RANDOM_BLOCKS_DEFAULT = 0
  43. SPARSE_LOCAL_WINDOW_BLOCKS = "local_window_blocks"
  44. SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT = [4]
  45. SPARSE_GLOBAL_BLOCK_INDICES = "global_block_indices"
  46. SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT = [0]
  47. SPARSE_GLOBAL_BLOCK_END_INDICES = "global_block_end_indices"
  48. SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT = None
  49. SPARSE_NUM_SLIDING_WINDOW_BLOCKS = "num_sliding_window_blocks"
  50. SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT = 3
  51. #############################################
  52. # Optimizer and lr scheduler
  53. #############################################
  54. OPTIMIZER = "optimizer"
  55. OPTIMIZER_TYPE_DEFAULT = None
  56. OPTIMIZER_PARAMS = "params"
  57. TYPE = "type"
  58. LEGACY_FUSION = "legacy_fusion"
  59. LEGACY_FUSION_DEFAULT = False
  60. SCHEDULER = "scheduler"
  61. SCHEDULER_TYPE_DEFAULT = None
  62. SCHEDULER_PARAMS = "params"
  63. MAX_GRAD_NORM = 'max_grad_norm'
  64. #############################################
  65. # Optimizer and lr scheduler
  66. #############################################
  67. ZERO_ALLOW_UNTESTED_OPTIMIZER = "zero_allow_untested_optimizer"
  68. ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT = False
  69. ZERO_FORCE_DS_CPU_OPTIMIZER = "zero_force_ds_cpu_optimizer"
  70. ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT = True
  71. # Steps
  72. STEPS_PER_PRINT = "steps_per_print"
  73. STEPS_PER_PRINT_DEFAULT = 10
  74. #########################################
  75. # Training micro batch size per GPU
  76. #########################################
  77. # Batch size for one training step. This is used when the
  78. # TRAIN_BATCH_SIZE cannot fit in GPU memory to determine
  79. # the number of gradient accumulation steps. By default, this
  80. # is set to None. Users can configure in ds_config.json as below example:
  81. TRAIN_MICRO_BATCH_SIZE_PER_GPU = '''
  82. TRAIN_MICRO_BATCH_SIZE_PER_GPU is defined in this format:
  83. "train_micro_batch_size_per_gpu": 1
  84. '''
  85. TRAIN_MICRO_BATCH_SIZE_PER_GPU = "train_micro_batch_size_per_gpu"
  86. TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = None
  87. #########################################
  88. # Gradient Accumulation
  89. #########################################
  90. # Gradient accumulation feature. By default, this feature is not enabled.
  91. # Users can configure in ds_config.json as below example:
  92. GRADIENT_ACCUMULATION_FORMAT = '''
  93. Gradient Accumulation should be of the format:
  94. "gradient_accumulation_steps": 1
  95. '''
  96. GRADIENT_ACCUMULATION_STEPS = "gradient_accumulation_steps"
  97. GRADIENT_ACCUMULATION_STEPS_DEFAULT = None
  98. # DeepSpeed CSR gradient sparsity
  99. SPARSE_GRADIENTS = "sparse_gradients"
  100. SPARSE_GRADIENTS_DEFAULT = False
  101. #########################################
  102. # BFLOAT16 support
  103. #########################################
  104. # BFLOAT16 feature. By default, this feature is not enabled.
  105. # Users can configure in ds_config.json as below example:
  106. BFLOAT16_FORMAT = '''
  107. BFLOAT16 parameters should be of the format:
  108. "bf16": {
  109. "enabled": true
  110. }
  111. '''
  112. BFLOAT16 = "bf16"
  113. BFLOAT16_OLD = "bfloat16" # keeping for backwards compatibility
  114. BFLOAT16_ENABLED = "enabled"
  115. BFLOAT16_ENABLED_DEFAULT = False
  116. #########################################
  117. # FP16 support
  118. #########################################
  119. # FP16 feature. By default, this feature is not enabled.
  120. # Users can configure in ds_config.json as below example:
  121. FP16_FORMAT = '''
  122. FP16 parameters should be of the format:
  123. "fp16": {
  124. "enabled": true,
  125. "auto_cast": false,
  126. "loss_scale": 0,
  127. "initial_scale_power": 16,
  128. "loss_scale_window": 1000,
  129. "hysteresis": 2,
  130. "consecutive_hysteresis": false,
  131. "min_loss_scale": 1
  132. }
  133. '''
  134. FP16 = "fp16"
  135. FP16_ENABLED = "enabled"
  136. FP16_ENABLED_DEFAULT = False
  137. # FP16 loss scale, zero means using dynamic scaling
  138. FP16_LOSS_SCALE = "loss_scale"
  139. FP16_LOSS_SCALE_DEFAULT = 0
  140. FP16_AUTO_CAST = "auto_cast"
  141. FP16_AUTO_CAST_DEFAULT = False
  142. # FP16 initial dynamic scale loss power
  143. FP16_INITIAL_SCALE_POWER = "initial_scale_power"
  144. FP16_INITIAL_SCALE_POWER_DEFAULT = 16
  145. # FP16 loss scale window
  146. FP16_LOSS_SCALE_WINDOW = "loss_scale_window"
  147. FP16_LOSS_SCALE_WINDOW_DEFAULT = 1000
  148. # FP16 hysteresis
  149. FP16_HYSTERESIS = "hysteresis"
  150. FP16_HYSTERESIS_DEFAULT = 2
  151. # FP16 consecutive hysteresis
  152. FP16_CONSECUTIVE_HYSTERESIS = "consecutive_hysteresis"
  153. FP16_CONSECUTIVE_HYSTERESIS_DEFAULT = False
  154. # FP16 min loss scale
  155. FP16_MIN_LOSS_SCALE = "min_loss_scale"
  156. FP16_MIN_LOSS_SCALE_DEFAULT = 1
  157. # FP16 master and grads
  158. FP16_MASTER_WEIGHTS_AND_GRADS = "fp16_master_weights_and_grads"
  159. FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT = False
  160. #########################################
  161. # Apex AMP support
  162. #########################################
  163. # Use Apex AMP for mixed precision support, all parameters (other than 'enabled') will be passed to
  164. # amp.initialize(model, optimizer, **amp_params)
  165. # See apex documentation for supported parameters/features: https://nvidia.github.io/apex/amp.html#apex.amp.initialize
  166. AMP_FORMAT = '''
  167. "amp" {
  168. "enabled: true,
  169. "opt_level": "O1",
  170. ...
  171. }
  172. '''
  173. AMP = "amp"
  174. AMP_ENABLED = "enabled"
  175. AMP_ENABLED_DEFAULT = False
  176. #########################################
  177. # Gradient clipping
  178. #########################################
  179. # Gradient clipping. By default, this feature is not enabled.
  180. # Users can configure in ds_config.json as below example:
  181. GRADIENT_CLIPPING_FORMAT = '''
  182. Gradient clipping should be enabled as:
  183. "gradient_clipping": 1.0
  184. '''
  185. GRADIENT_CLIPPING = 'gradient_clipping'
  186. GRADIENT_CLIPPING_DEFAULT = 0.
  187. #########################################
  188. # Communication data type
  189. #########################################
  190. # Supported types: ['none', 'fp16', 'fp32']
  191. # By default, this feature is not enabled ('none' value)
  192. # Users can configure in ds_config.json as below example:
  193. COMMUNICATION_DATA_TYPE_FORMAT = '''
  194. Communication data type should be set as:
  195. "communication_data_type": "fp32"
  196. '''
  197. COMMUNICATION_DATA_TYPE = "communication_data_type"
  198. COMMUNICATION_DATA_TYPE_DEFAULT = None
  199. #########################################
  200. # Scale/predivide gradients before allreduce
  201. #########################################
  202. # Prescale gradients. By default, this feature is not enabled.
  203. # Users can configure in ds_config.json as below example:
  204. PRESCALE_GRADIENTS_FORMAT = '''
  205. Gradient prescaling should be enabled as:
  206. "prescale_gradients": true
  207. '''
  208. PRESCALE_GRADIENTS = "prescale_gradients"
  209. PRESCALE_GRADIENTS_DEFAULT = False
  210. GRADIENT_PREDIVIDE_FACTOR_FORMAT = '''
  211. Gradient predivide factor should be enabled as:
  212. "gradient_predivide_factor": 1.0
  213. '''
  214. GRADIENT_PREDIVIDE_FACTOR = "gradient_predivide_factor"
  215. GRADIENT_PREDIVIDE_FACTOR_DEFAULT = 1.0
  216. #########################################
  217. # Disable AllGather
  218. #########################################
  219. # Disable AllGather. By default, this feature is not enabled.
  220. # Users can configure in ds_config.json as below example:
  221. DISABLE_ALLGATHER_FORMAT = '''
  222. Disable AllGather should be enabled as:
  223. "disable_allgather": true
  224. '''
  225. DISABLE_ALLGATHER = "disable_allgather"
  226. DISABLE_ALLGATHER_DEFAULT = False
  227. #########################################
  228. # Dump DeepSpeed state
  229. #########################################
  230. # Dump State. By default, this feature is not enabled.
  231. # Users can configure in ds_config.json as below example:
  232. DUMP_STATE_FORMAT = '''
  233. Dump state should be enabled as:
  234. "dump_state": true
  235. '''
  236. DUMP_STATE = 'dump_state'
  237. DUMP_STATE_DEFAULT = False
  238. #########################################
  239. # Vocabulary size
  240. #########################################
  241. # Vocabulary size.
  242. # Users can configure in ds_config.json as below example:
  243. VOCABULARY_SIZE_FORMAT = '''
  244. Vocabulary size can be specified as:
  245. "vocabulary_size": 1024
  246. '''
  247. VOCABULARY_SIZE = 'vocabulary_size'
  248. VOCABULARY_SIZE_DEFAULT = None
  249. #########################################
  250. # Wall block breakdown
  251. #########################################
  252. # Wall clock breakdown. By default, this feature is not enabled.
  253. # Users can configure in ds_config.json as below example:
  254. WALL_CLOCK_BREAKDOWN_FORMAT = '''
  255. Wall block breakdown should be enabled as:
  256. "wall_clock_breakdown": true
  257. '''
  258. WALL_CLOCK_BREAKDOWN = 'wall_clock_breakdown'
  259. WALL_CLOCK_BREAKDOWN_DEFAULT = False
  260. MEMORY_BREAKDOWN = 'memory_breakdown'
  261. MEMORY_BREAKDOWN_DEFAULT = False
  262. #########################################
  263. # Eigenvalue
  264. #########################################
  265. # Eigenvalue computation. By default, this feature is not enabled.
  266. # Users can configure in ds_config.json as below example:
  267. EIGENVALUE_FORMAT = '''
  268. Tensorboard can be specified as:
  269. "eigenvalue": {
  270. "enabled": true,
  271. "verbose": true,
  272. "max_iter": 100,
  273. "tol": 1e-2,
  274. "stability": 1e-6
  275. }
  276. '''
  277. EIGENVALUE = "eigenvalue"
  278. # Tensorboard enable signal
  279. EIGENVALUE_ENABLED = "enabled"
  280. EIGENVALUE_ENABLED_DEFAULT = False
  281. EIGENVALUE_VERBOSE = "verbose"
  282. EIGENVALUE_VERBOSE_DEFAULT = False
  283. EIGENVALUE_MAX_ITER = "max_iter"
  284. EIGENVALUE_MAX_ITER_DEFAULT = 100
  285. EIGENVALUE_TOL = "tol"
  286. EIGENVALUE_TOL_DEFAULT = 1e-2
  287. EIGENVALUE_STABILITY = "stability"
  288. EIGENVALUE_STABILITY_DEFAULT = 1e-6
  289. EIGENVALUE_GAS_BOUNDARY_RESOLUTION = "gas_boundary_resolution"
  290. EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT = 1
  291. EIGENVALUE_LAYER_NAME = "layer_name"
  292. EIGENVALUE_LAYER_NAME_DEFAULT = "bert.encoder.layer"
  293. EIGENVALUE_LAYER_NUM = "layer_num"
  294. EIGENVALUE_LAYER_NUM_DEFAULT = 0
  295. #########################################
  296. # Progressive Layer Drop (PLD)
  297. #########################################
  298. PROGRESSIVE_LAYER_DROP = "progressive_layer_drop"
  299. # PLD enable signal
  300. PLD_ENABLED = "enabled"
  301. PLD_ENABLED_DEFAULT = False
  302. PLD_THETA = "theta"
  303. PLD_THETA_DEFAULT = 1.0
  304. PLD_GAMMA = "gamma"
  305. PLD_GAMMA_DEFAULT = 0.001
  306. #########################################
  307. # Validation modes
  308. #########################################
  309. class ValidationMode:
  310. WARN = "WARN"
  311. IGNORE = "IGNORE"
  312. FAIL = "FAIL"
  313. #########################################
  314. # Checkpoint config params
  315. #########################################
  316. # "checkpoint": {
  317. # tag_validation=["Ignore"|"Warn"|"Fail"]
  318. # load_universal=false
  319. # use_node_local_storage=false
  320. # parallel_write: {
  321. # pipeline_stage: [True|False]
  322. # }
  323. # }
  324. CHECKPOINT = "checkpoint"
  325. CHECKPOINT_TAG_VALIDATION = "tag_validation"
  326. CHECKPOINT_TAG_VALIDATION_DEFAULT = ValidationMode.WARN
  327. CHECKPOINT_TAG_VALIDATION_MODES = [ValidationMode.WARN, ValidationMode.IGNORE, ValidationMode.FAIL]
  328. LOAD_UNIVERSAL_CHECKPOINT = "load_universal"
  329. LOAD_UNIVERSAL_CHECKPOINT_DEFAULT = False
  330. USE_NODE_LOCAL_STORAGE_CHECKPOINT = "use_node_local_storage"
  331. USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT = False
  332. CHECKPOINT_PARALLEL_WRITE = "parallel_write"
  333. CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE = "pipeline_stage"
  334. CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT = False
  335. #########################################
  336. # Data types config params
  337. #########################################
  338. # "data_types": {
  339. # grad_accum_dtype=["bf16"|"fp16"|"fp32"]
  340. # }
  341. # }
  342. DATA_TYPES = "data_types"
  343. GRAD_ACCUM_DTYPE = "grad_accum_dtype"
  344. GRAD_ACCUM_DTYPE_DEFAULT = None
  345. #########################################
  346. # Drop the last incomplete Batch
  347. # #########################################
  348. # dataloader_drop_last. By default, this feature is not enabled.
  349. # Users can configure in ds_config.json as below example:
  350. DATALOADER_DROP_LAST_FORMAT = '''
  351. The last incomplete batch can be dropped by setting:
  352. "dataloader_drop_last": True
  353. '''
  354. DATALOADER_DROP_LAST = "dataloader_drop_last"
  355. DATALOADER_DROP_LAST_DEFAULT = False
  356. #########################################
  357. # PIPELINE PARALLELISM
  358. #########################################
  359. PIPE_REPLICATED = 'ds_pipe_replicated'
  360. #########################################
  361. # DATA PARALLELISM
  362. #########################################
  363. DATA_PARALLEL_GROUP = "data_parallel_group"
  364. GLOBAL_RANK = "global_rank"