reshape_meg_2d.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. from .reshape_utils import partition_data
  5. class meg_2d_parallel_map(object):
  6. def __init__(self, pp_degree, tp_degree):
  7. self.pp_degree = pp_degree
  8. self.tp_degree = tp_degree
  9. self.map = {}
  10. def simple_init(self):
  11. self.map = {
  12. self._make_key(i // self.tp_degree, i % self.tp_degree): [i]
  13. for i in range(self.pp_degree * self.tp_degree)
  14. }
  15. def add_data(self, pp_index, tp_index, data):
  16. self._validate_indices(pp_index, tp_index)
  17. assert type(data) is list
  18. key = self._make_key(pp_index, tp_index)
  19. if not key in self.map.keys():
  20. self.map[key] = []
  21. self.map[key] += data
  22. def get_data(self, pp_index=None, tp_index=None):
  23. self._validate_indices(pp_index, tp_index)
  24. pp_indices = list(range(self.pp_degree)) if pp_index is None else [pp_index]
  25. tp_indices = list(range(self.tp_degree)) if tp_index is None else [tp_index]
  26. result = []
  27. for i in pp_indices:
  28. for j in tp_indices:
  29. result += self.map[self._make_key(i, j)]
  30. return result
  31. def print_data(self, tag):
  32. print(f'{tag}')
  33. for key, value in self.map.items():
  34. print(f'{key} = {value}')
  35. def _validate_indices(self, pp_index, tp_index):
  36. assert pp_index is None or pp_index < self.pp_degree
  37. assert tp_index is None or tp_index < self.tp_degree
  38. def _make_key(self, i, j):
  39. return f'{i},{j}'
  40. def _reshape_tp_dimension(old_2d_map, new_tp_degree):
  41. old_pp_degree = old_2d_map.pp_degree
  42. new_2d_map = meg_2d_parallel_map(old_pp_degree, new_tp_degree)
  43. for i in range(old_pp_degree):
  44. ranks_for_pp_index = old_2d_map.get_data(pp_index=i, tp_index=None)
  45. split_ranks = partition_data(ranks_for_pp_index, new_tp_degree)
  46. for j in range(new_tp_degree):
  47. new_2d_map.add_data(i, j, split_ranks[j])
  48. return new_2d_map
  49. def _reshape_pp_dimension(old_2d_map, new_pp_degree):
  50. old_tp_degree = old_2d_map.tp_degree
  51. new_2d_map = meg_2d_parallel_map(new_pp_degree, old_tp_degree)
  52. for i in range(old_tp_degree):
  53. ranks_for_tp_index = old_2d_map.get_data(pp_index=None, tp_index=i)
  54. split_ranks = partition_data(ranks_for_tp_index, new_pp_degree)
  55. for j in range(new_pp_degree):
  56. new_2d_map.add_data(j, i, split_ranks[j])
  57. return new_2d_map
  58. def reshape_meg_2d_parallel(old_pp_degree, old_tp_degree, new_pp_degree, new_tp_degree, verbose=False):
  59. assert new_pp_degree <= old_pp_degree
  60. assert new_tp_degree <= old_tp_degree
  61. old_2d_map = meg_2d_parallel_map(old_pp_degree, old_tp_degree)
  62. old_2d_map.simple_init()
  63. if verbose:
  64. old_2d_map.print_data(f'original_2d_map:')
  65. if old_tp_degree != new_tp_degree:
  66. new_tp_map = _reshape_tp_dimension(old_2d_map, new_tp_degree)
  67. else:
  68. new_tp_map = old_2d_map
  69. if verbose:
  70. new_tp_map.print_data(f'after_tp_reshape:')
  71. if old_pp_degree != new_pp_degree:
  72. final_map = _reshape_pp_dimension(new_tp_map, new_pp_degree)
  73. else:
  74. final_map = new_tp_map
  75. if verbose:
  76. final_map.print_data(f'final_2d_map:')
  77. return final_map
  78. def get_mpu_ranks(tp_size=1, pp_size=1, dp_size=1, virtual_pp_size=None):
  79. """
  80. Initialize model data parallel groups.
  81. Arguments:
  82. tp_size: number of GPUs used to parallelize model tensor.
  83. pp_size: number of GPUs used to parallelize model pipeline.
  84. dp_size: number of GPUs used to parallelize model data.
  85. Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
  86. use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
  87. the model pipeline. The present function will
  88. create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
  89. and 8 data-parallel groups as:
  90. 8 data_parallel groups:
  91. [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
  92. 8 tensor model-parallel groups:
  93. [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
  94. 4 pipeline model-parallel groups:
  95. [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
  96. Note that for efficiency, the caller should make sure adjacent ranks
  97. are on the same DGX box. For example if we are using 2 DGX-1 boxes
  98. with a total of 16 GPUs, rank 0 to 7 belong to the first box and
  99. ranks 8 to 15 belong to the second box.
  100. """
  101. world_size = tp_size * pp_size * dp_size
  102. print(f"\n\n*** tp={tp_size}, pp={pp_size}, dp={dp_size}, world={world_size}")
  103. tensor_model_parallel_size = min(tp_size, world_size)
  104. pipeline_model_parallel_size = min(pp_size, world_size)
  105. data_parallel_size = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size)
  106. num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
  107. num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
  108. num_data_parallel_groups = world_size // data_parallel_size
  109. # Build the data-parallel groups.
  110. all_dp_group_ranks = []
  111. for i in range(pipeline_model_parallel_size):
  112. start_rank = i * num_pipeline_model_parallel_groups
  113. end_rank = (i + 1) * num_pipeline_model_parallel_groups
  114. for j in range(tensor_model_parallel_size):
  115. ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
  116. all_dp_group_ranks.append(list(ranks))
  117. print("DP", all_dp_group_ranks)
  118. # Build the model-parallel groups.
  119. all_pp_group_ranks = []
  120. for i in range(data_parallel_size):
  121. ranks = [data_parallel_group_ranks[i] for data_parallel_group_ranks in all_dp_group_ranks]
  122. all_pp_group_ranks.append(list(ranks))
  123. print(f"PP", all_pp_group_ranks)
  124. # Build the tensor model-parallel groups.
  125. all_tp_group_ranks = []
  126. for i in range(num_tensor_model_parallel_groups):
  127. ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
  128. all_tp_group_ranks.append(list(ranks))
  129. print(f"TP", all_tp_group_ranks)
  130. return all_tp_group_ranks, all_pp_group_ranks, all_dp_group_ranks
  131. # # Build the pipeline model-parallel groups and embedding groups
  132. # # (first and last rank in each pipeline model-parallel group).
  133. # for i in range(num_pipeline_model_parallel_groups):
  134. # ranks = range(i, world_size,
  135. # num_pipeline_model_parallel_groups)
  136. # print(f"EMB{i}", list(ranks))
  137. def reshape(src, tgt):
  138. """
  139. reshape([tp_size_src, pp_size_src, dp_size_src],
  140. [tp_size_tgt, pp_size_tgt, dp_size_tgt])
  141. """
  142. print(f"\n\n*** Reshaping: {src} => {tgt}")
  143. tp_size_src, pp_size_src, dp_size_src = src
  144. tp_size_tgt, pp_size_tgt, dp_size_tgt = tgt
  145. tp_ranks1, pp_ranks1, dp_ranks1 = get_mpu_ranks(tp_size=tp_size_src, pp_size=pp_size_src, dp_size=dp_size_src)
  146. tp_ranks2, pp_ranks2, dp_ranks2 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_src, dp_size=dp_size_src)
  147. tp_ranks3, pp_ranks3, dp_ranks3 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_tgt, dp_size=dp_size_src)
  148. # handle tp contraction first
  149. print("\n*** TP contraction:")
  150. for i, r in enumerate(tp_ranks1):
  151. print(f'{tp_ranks1[i]} => {tp_ranks2[i]}')
  152. # handle pp contraction next
  153. print("\n*** PP contraction:")
  154. for i, r in enumerate(pp_ranks1):
  155. print(f'{pp_ranks2[i]} => {pp_ranks3[i]}')
  156. # easy
  157. #reshape([2,2,1],[1,1,1])
  158. # probably need more logic to suggest how to pack
  159. #reshape([4,4,1],[2,2,1])
  160. #reshape([2,4,2], [8,32,1])
  161. # get_mpu_ranks(2,2,2)
  162. # get_mpu_ranks(4,2,1)
  163. # get_mpu_ranks(2,4,1)
  164. # get_mpu_ranks(1,1,8)