learner_thread.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. import copy
  2. from six.moves import queue
  3. import threading
  4. from typing import Dict, Optional
  5. from ray.rllib.evaluation.rollout_worker import RolloutWorker
  6. from ray.rllib.execution.minibatch_buffer import MinibatchBuffer
  7. from ray.rllib.utils.framework import try_import_tf
  8. from ray.rllib.utils.metrics.learner_info import LearnerInfoBuilder, \
  9. LEARNER_INFO, LEARNER_STATS_KEY
  10. from ray.rllib.utils.metrics.window_stat import WindowStat
  11. from ray.rllib.utils.timer import TimerStat
  12. from ray.util.iter import _NextValueNotReady
  13. tf1, tf, tfv = try_import_tf()
  14. class LearnerThread(threading.Thread):
  15. """Background thread that updates the local model from sample trajectories.
  16. The learner thread communicates with the main thread through Queues. This
  17. is needed since Ray operations can only be run on the main thread. In
  18. addition, moving heavyweight gradient ops session runs off the main thread
  19. improves overall throughput.
  20. """
  21. def __init__(self, local_worker: RolloutWorker, minibatch_buffer_size: int,
  22. num_sgd_iter: int, learner_queue_size: int,
  23. learner_queue_timeout: int):
  24. """Initialize the learner thread.
  25. Args:
  26. local_worker (RolloutWorker): process local rollout worker holding
  27. policies this thread will call learn_on_batch() on
  28. minibatch_buffer_size (int): max number of train batches to store
  29. in the minibatching buffer
  30. num_sgd_iter (int): number of passes to learn on per train batch
  31. learner_queue_size (int): max size of queue of inbound
  32. train batches to this thread
  33. learner_queue_timeout (int): raise an exception if the queue has
  34. been empty for this long in seconds
  35. """
  36. threading.Thread.__init__(self)
  37. self.learner_queue_size = WindowStat("size", 50)
  38. self.local_worker = local_worker
  39. self.inqueue = queue.Queue(maxsize=learner_queue_size)
  40. self.outqueue = queue.Queue()
  41. self.minibatch_buffer = MinibatchBuffer(
  42. inqueue=self.inqueue,
  43. size=minibatch_buffer_size,
  44. timeout=learner_queue_timeout,
  45. num_passes=num_sgd_iter,
  46. init_num_passes=num_sgd_iter)
  47. self.queue_timer = TimerStat()
  48. self.grad_timer = TimerStat()
  49. self.load_timer = TimerStat()
  50. self.load_wait_timer = TimerStat()
  51. self.daemon = True
  52. self.weights_updated = False
  53. self.learner_info = {}
  54. self.stopped = False
  55. self.num_steps = 0
  56. def run(self) -> None:
  57. # Switch on eager mode if configured.
  58. if self.local_worker.policy_config.get("framework") in ["tf2", "tfe"]:
  59. tf1.enable_eager_execution()
  60. while not self.stopped:
  61. self.step()
  62. def step(self) -> Optional[_NextValueNotReady]:
  63. with self.queue_timer:
  64. try:
  65. batch, _ = self.minibatch_buffer.get()
  66. except queue.Empty:
  67. return _NextValueNotReady()
  68. with self.grad_timer:
  69. # Use LearnerInfoBuilder as a unified way to build the final
  70. # results dict from `learn_on_loaded_batch` call(s).
  71. # This makes sure results dicts always have the same structure
  72. # no matter the setup (multi-GPU, multi-agent, minibatch SGD,
  73. # tf vs torch).
  74. learner_info_builder = LearnerInfoBuilder(num_devices=1)
  75. multi_agent_results = self.local_worker.learn_on_batch(batch)
  76. for pid, results in multi_agent_results.items():
  77. learner_info_builder.add_learn_on_batch_results(results, pid)
  78. self.learner_info = learner_info_builder.finalize()
  79. learner_stats = {
  80. pid: info[LEARNER_STATS_KEY]
  81. for pid, info in self.learner_info.items()
  82. }
  83. self.weights_updated = True
  84. self.num_steps += 1
  85. self.outqueue.put((batch.count, learner_stats))
  86. self.learner_queue_size.push(self.inqueue.qsize())
  87. def add_learner_metrics(self, result: Dict) -> Dict:
  88. """Add internal metrics to a trainer result dict."""
  89. def timer_to_ms(timer):
  90. return round(1000 * timer.mean, 3)
  91. result["info"].update({
  92. "learner_queue": self.learner_queue_size.stats(),
  93. LEARNER_INFO: copy.deepcopy(self.learner_info),
  94. "timing_breakdown": {
  95. "learner_grad_time_ms": timer_to_ms(self.grad_timer),
  96. "learner_load_time_ms": timer_to_ms(self.load_timer),
  97. "learner_load_wait_time_ms": timer_to_ms(self.load_wait_timer),
  98. "learner_dequeue_time_ms": timer_to_ms(self.queue_timer),
  99. }
  100. })
  101. return result