schedule.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. from ..utils import call_to_str
  2. from abc import ABC, abstractmethod
  3. class PipeSchedule(ABC):
  4. """Directs the execution of a pipeline engine by generating sequences of
  5. :class:`PipeInstruction`.
  6. Schedules are generators that yield sequences of
  7. :class:`PipeInstruction` to process the micro-batches in one batch.
  8. Each yielded step is atomic in the sense that a barrier
  9. synchronization can be placed between successive steps without
  10. deadlock.
  11. Below is an example schedule that implements data parallelism with gradient accumulation:
  12. .. code-block:: python
  13. class DataParallelSchedule(PipeSchedule):
  14. def steps(self):
  15. for step_id in range(self.micro_batches):
  16. cmds = [
  17. LoadMicroBatch(buffer_id=0),
  18. ForwardPass(buffer_id=0),
  19. BackwardPass(buffer_id=0),
  20. ]
  21. if step_id == self.micro_batches - 1:
  22. cmds.extend([
  23. ReduceGrads(),
  24. OptimizerStep(),
  25. ])
  26. yield cmds
  27. def num_pipe_buffers(self):
  28. return 1
  29. Args:
  30. micro_batches (int): The number of micro-batches that comprise a batch.
  31. stages (int): The number of pipeline stages.
  32. stage_id (int): The pipe stage that will execute the generated schedule.
  33. """
  34. def __init__(self, micro_batches, stages, stage_id):
  35. super().__init__()
  36. self.micro_batches = micro_batches
  37. self.stages = stages
  38. self.stage_id = stage_id
  39. self.prev_stage = self.stage_id - 1
  40. self.next_stage = self.stage_id + 1
  41. @abstractmethod
  42. def steps(self):
  43. """Yield a list of :class:`PipeInstruction` for each step in the schedule.
  44. .. note::
  45. Schedules must implement ``steps()`` to define the schedule.
  46. Returns:
  47. Instructions to be executed as one step of the pipeline
  48. """
  49. pass
  50. def num_pipe_buffers(self):
  51. """The number of pipeline buffers that will be used by this stage.
  52. .. note::
  53. Schedules should specialize ``num_pipe_buffers()`` for memory savings at scale.
  54. Returns:
  55. The number of buffers for the engine to allocate.
  56. """
  57. return self.micro_batches
  58. def _valid_micro_batch(self, micro_batch_id):
  59. return 0 <= micro_batch_id < self.micro_batches
  60. def _valid_stage(self, stage_id):
  61. return 0 <= stage_id < self.stages
  62. @property
  63. def stage(self):
  64. """Stage index used to configure this schedule."""
  65. return self.stage_id
  66. @property
  67. def num_stages(self):
  68. """The number of total pipeline stages used to configure this schedule."""
  69. return self.stages
  70. @property
  71. def num_micro_batches(self):
  72. """The number of total micro_batches used to configure this schedule."""
  73. return self.micro_batches
  74. @property
  75. def is_first_stage(self):
  76. """True if the configured ``stage_id`` is the first stage in the pipeline."""
  77. return self.stage_id == 0
  78. @property
  79. def is_last_stage(self):
  80. """True if the configured ``stage_id`` is the last stage in the pipeline."""
  81. return self.stage_id == self.stages - 1
  82. def _buffer_idx(self, micro_batch_id):
  83. """Map a micro-batch index to a pipeline buffer index.
  84. This method uses a cyclic allocation strategy.
  85. Args:
  86. micro_batch_id (int): The micro-batch index relative to the beginning of the schedule.
  87. Returns:
  88. int: The index of the buffer that should store data.
  89. """
  90. assert self._valid_micro_batch(micro_batch_id)
  91. return micro_batch_id % self.num_pipe_buffers()
  92. def __iter__(self):
  93. self.it = None
  94. return self
  95. def __next__(self):
  96. if self.it is None:
  97. self.it = self.steps()
  98. return next(self.it)
  99. class InferenceSchedule(PipeSchedule):
  100. """A schedule for inferencing batches using pipeline parallelism.
  101. """
  102. def steps(self):
  103. """"""
  104. prev_micro_batch_id = -1
  105. total_steps = self.micro_batches + self.stages - 1
  106. for step_id in range(total_steps):
  107. cmds = []
  108. micro_batch_id = step_id - self.stage_id
  109. # Alternate send/recv buffers
  110. if _is_even(self.stage_id):
  111. recv_buf = step_id % 2
  112. send_buf = (step_id + 1) % 2
  113. else:
  114. recv_buf = (step_id + 1) % 2
  115. send_buf = step_id % 2
  116. if self.is_first_stage or self.is_last_stage:
  117. if self._valid_micro_batch(micro_batch_id):
  118. cmds.append(LoadMicroBatch(recv_buf))
  119. if _is_even(self.stage_id):
  120. if self._valid_stage(self.next_stage):
  121. if self._valid_micro_batch(micro_batch_id - 1):
  122. cmds.append(SendActivation(send_buf))
  123. if self._valid_stage(self.prev_stage):
  124. if self._valid_micro_batch(micro_batch_id):
  125. cmds.append(RecvActivation(recv_buf))
  126. else:
  127. if self._valid_stage(self.prev_stage):
  128. if self._valid_micro_batch(micro_batch_id):
  129. cmds.append(RecvActivation(recv_buf))
  130. if self._valid_stage(self.next_stage):
  131. if self._valid_micro_batch(micro_batch_id - 1):
  132. cmds.append(SendActivation(send_buf))
  133. if self._valid_micro_batch(micro_batch_id):
  134. cmds.append(ForwardPass(recv_buf))
  135. yield cmds
  136. def num_pipe_buffers(self):
  137. """Only two pipeline buffers are required for inferencing.
  138. Returns:
  139. ``2``
  140. """
  141. return 2
  142. class TrainSchedule(PipeSchedule):
  143. """A schedule for training a batch using hybrid parallelism.
  144. Pipeline parallelism is extracted through gradient accumulation and thus
  145. convergence follows that of a data parallel approach with the same batch
  146. size.
  147. """
  148. def steps(self):
  149. """"""
  150. prev_micro_batch_id = -1
  151. total_steps = 2 * (self.micro_batches + self.stages - 1)
  152. for step_id in range(total_steps):
  153. # Map the step of the pipeline to the micro-batch id and also whether it is a
  154. # forward or backward pass step.
  155. micro_batch_id, is_forward = self._step_to_micro_batch(step_id)
  156. if self._valid_micro_batch(prev_micro_batch_id):
  157. prev_buffer = self._buffer_idx(prev_micro_batch_id)
  158. if self._valid_micro_batch(micro_batch_id):
  159. curr_buffer = self._buffer_idx(micro_batch_id)
  160. cmds = []
  161. # Exchange activations
  162. if is_forward:
  163. if self._valid_micro_batch(micro_batch_id) and self._valid_stage(
  164. self.prev_stage):
  165. cmds.append(RecvActivation(curr_buffer))
  166. if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(
  167. self.prev_stage):
  168. cmds.append(SendGrad(prev_buffer))
  169. else:
  170. if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(
  171. self.next_stage):
  172. cmds.append(SendActivation(prev_buffer))
  173. if self._valid_micro_batch(micro_batch_id) and self._valid_stage(
  174. self.next_stage):
  175. cmds.append(RecvGrad(curr_buffer))
  176. # First/last stage loads
  177. if self.stage_id == 0 or self.stage_id == self.stages - 1:
  178. if is_forward and self._valid_micro_batch(micro_batch_id):
  179. cmds.append(LoadMicroBatch(curr_buffer))
  180. # Computation
  181. if self._valid_micro_batch(micro_batch_id):
  182. if is_forward:
  183. cmds.append(ForwardPass(curr_buffer))
  184. else:
  185. cmds.append(BackwardPass(curr_buffer))
  186. # Model step at the end of the batch
  187. if step_id == total_steps - 1:
  188. cmds.append(ReduceTiedGrads())
  189. cmds.append(ReduceGrads())
  190. cmds.append(OptimizerStep())
  191. # Prepare state for next time
  192. prev_micro_batch_id = micro_batch_id
  193. yield cmds
  194. def num_pipe_buffers(self):
  195. """As many buffers as the distance from this stage to the last stage.
  196. """
  197. buffers = min(self.stages - self.stage_id + 1, self.micro_batches)
  198. return max(2, buffers)
  199. def _step_to_micro_batch(self, step_id):
  200. if _is_even(step_id) and _is_even(self.stage_id):
  201. micro_batch_id = self._even_step_forward_id(step_id)
  202. is_forward = True
  203. elif _is_odd(step_id) and _is_odd(self.stage_id):
  204. micro_batch_id = self._odd_step_forward_id(step_id)
  205. is_forward = True
  206. elif _is_even(step_id) and _is_odd(self.stage_id):
  207. micro_batch_id = self._even_step_backward_id(step_id)
  208. is_forward = False
  209. elif _is_odd(step_id) and _is_even(self.stage_id):
  210. micro_batch_id = self._odd_step_backward_id(step_id)
  211. is_forward = False
  212. else:
  213. assert False
  214. return micro_batch_id, is_forward
  215. def _even_step_forward_id(self, step_id):
  216. base = step_id // 2
  217. micro_batch_id = int(base - self.stage_id // 2)
  218. return micro_batch_id
  219. def _odd_step_forward_id(self, step_id):
  220. base = (step_id - 1) // 2
  221. micro_batch_id = int(base - self.stage_id // 2)
  222. return micro_batch_id
  223. def _even_step_backward_id(self, step_id):
  224. base = step_id // 2
  225. micro_batch_id = int(base - self.stages + (self.stage_id + 1) // 2)
  226. return micro_batch_id
  227. def _odd_step_backward_id(self, step_id):
  228. base = ((step_id - 1) // 2) - self.stages + 1
  229. micro_batch_id = int(base + self.stage_id // 2)
  230. return micro_batch_id
  231. class DataParallelSchedule(PipeSchedule):
  232. """An example schedule that trains using traditional data parallelism with gradient
  233. accumulation.
  234. """
  235. def steps(self):
  236. """"""
  237. for step_id in range(self.micro_batches):
  238. cmds = [
  239. LoadMicroBatch(buffer_id=0),
  240. ForwardPass(buffer_id=0),
  241. BackwardPass(buffer_id=0),
  242. ]
  243. if step_id == self.micro_batches - 1:
  244. cmds.extend([
  245. ReduceGrads(),
  246. OptimizerStep(),
  247. ])
  248. yield cmds
  249. def num_pipe_buffers(self):
  250. """Only one pipeline buffer needed.
  251. """
  252. return 1
  253. class PipeInstruction:
  254. """Base class for all instructions to be executed by the pipeline engine.
  255. All keyword arguments are stored as members similar to a ``namedtuple``. These are
  256. then accessible to the :class:`PipeEngine` during execution.
  257. Args:
  258. kwargs (optional): keyword arguments to store as members
  259. """
  260. def __init__(self, **kwargs):
  261. self.name = self.__class__.__name__
  262. self.kwargs = kwargs
  263. for key, val in kwargs.items():
  264. setattr(self, key, val)
  265. def __repr__(self):
  266. return call_to_str(self.name, **self.kwargs)
  267. class OptimizerStep(PipeInstruction):
  268. """Performs one step with the optimizer and zeros gradients.
  269. .. note:: Should be issued after :class:`ReduceGrads` and :class:`ReduceTiedGrads`.
  270. .. note:: Can be a synchronization point among data-parallel ranks.
  271. """
  272. pass
  273. class ReduceGrads(PipeInstruction):
  274. """Reduce the computed gradients among data-parallel processes within the stage.
  275. """
  276. pass
  277. class ReduceTiedGrads(PipeInstruction):
  278. """Reduce the computed gradients of tied modules within a pipeline-parallel group.
  279. .. warning::
  280. The stages included in this synchronization point are not known until
  281. the model is partitioned among pipeline stages. In the worst case, it
  282. includes all pipeline stages. This instruction should be scheduled
  283. carefully to avoid deadlocks.
  284. """
  285. pass
  286. class BufferOpInstruction(PipeInstruction):
  287. """A pipeline instruction that operates on pipeline buffer(s).
  288. Args:
  289. buffer_id (int): the index of the pipeline buffer() to modify.
  290. """
  291. def __init__(self, buffer_id, **kwargs):
  292. super().__init__(buffer_id=buffer_id, **kwargs)
  293. # IO
  294. class LoadMicroBatch(BufferOpInstruction):
  295. """Load a micro-batch into a buffer.
  296. Roughly:
  297. .. code-block:: python
  298. buffers['inputs'][buffer_id] = next(data_iter)
  299. """
  300. pass
  301. # Compute
  302. class ForwardPass(BufferOpInstruction):
  303. """Compute a forward pass.
  304. Roughly:
  305. .. code-block:: python
  306. buffers['outputs'][buffer_id] = forward(buffers['inputs'][buffer_id])
  307. """
  308. pass
  309. class BackwardPass(BufferOpInstruction):
  310. """Compute a backward pass and accumulate gradients.
  311. Roughly:
  312. .. code-block:: python
  313. outputs = buffers['outputs'][buffer_id]
  314. gradients = buffers['gradients'][buffer_id]
  315. torch.autograd.backward(tensors=outputs,
  316. grad_tensors=gradients)
  317. """
  318. pass
  319. # Communication
  320. class SendActivation(BufferOpInstruction):
  321. """Send activations to the next stage in the pipeline.
  322. Roughly:
  323. .. code-block:: python
  324. send(buffers['outputs'][buffer_id])
  325. .. note::
  326. The communication is blocking and must be paired with a :class:`RecvActivation`
  327. on the next pipeline stage to avoid deadlock.
  328. """
  329. pass
  330. class RecvActivation(BufferOpInstruction):
  331. """Receive activations from the previous stage in the pipeline.
  332. Roughly:
  333. .. code-block:: python
  334. buffers['inputs'][buffer_id] = recv()
  335. .. note::
  336. The communication is blocking and must be paired with a :class:`SendActivation`
  337. on the previous pipeline stage to avoid deadlock.
  338. """
  339. pass
  340. class SendGrad(BufferOpInstruction):
  341. """Send computed gradients to the previous pipeline stage.
  342. with respect to the received activations
  343. .. note::
  344. Only received tensors with ``requires_grad==True`` will produce gradients.
  345. Missing gradients will be replaced with ``None`` on the receiving stage.
  346. .. note::
  347. The communication is blocking and must be paired with a :class:`RecvGrad`
  348. on the previous pipeline stage to avoid deadlock.
  349. """
  350. pass
  351. class RecvGrad(BufferOpInstruction):
  352. """Receive computed gradients the next pipeline stage.
  353. .. note::
  354. Only activations with ``requires_grad==True`` will produce gradients.
  355. Missing gradients will be replaced with ``None``.
  356. .. note::
  357. The communication is blocking and must be paired with a :class:`SendGrad`
  358. on the next pipeline stage to avoid deadlock.
  359. """
  360. pass
  361. def _is_even(x):
  362. return x % 2 == 0
  363. def _is_odd(x):
  364. return x % 2 != 0