metrics.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. import collections
  2. import logging
  3. import numpy as np
  4. from typing import Any, Dict, List, Optional, TYPE_CHECKING
  5. from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
  6. from ray.rllib.utils.annotations import DeveloperAPI
  7. from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY
  8. from ray.rllib.utils.typing import GradInfoDict, LearnerStatsDict, ResultDict
  9. if TYPE_CHECKING:
  10. from ray.rllib.evaluation.worker_set import WorkerSet
  11. logger = logging.getLogger(__name__)
  12. RolloutMetrics = DeveloperAPI(
  13. collections.namedtuple(
  14. "RolloutMetrics",
  15. [
  16. "episode_length",
  17. "episode_reward",
  18. "agent_rewards",
  19. "custom_metrics",
  20. "perf_stats",
  21. "hist_data",
  22. "media",
  23. "episode_faulty",
  24. "connector_metrics",
  25. ],
  26. )
  27. )
  28. RolloutMetrics.__new__.__defaults__ = (0, 0, {}, {}, {}, {}, {}, False, {})
  29. def _extract_stats(stats: Dict, key: str) -> Dict[str, Any]:
  30. if key in stats:
  31. return stats[key]
  32. multiagent_stats = {}
  33. for k, v in stats.items():
  34. if isinstance(v, dict):
  35. if key in v:
  36. multiagent_stats[k] = v[key]
  37. return multiagent_stats
  38. @DeveloperAPI
  39. def get_learner_stats(grad_info: GradInfoDict) -> LearnerStatsDict:
  40. """Return optimization stats reported from the policy.
  41. Example:
  42. >>> grad_info = worker.learn_on_batch(samples)
  43. {"td_error": [...], "learner_stats": {"vf_loss": ..., ...}}
  44. >>> print(get_stats(grad_info))
  45. {"vf_loss": ..., "policy_loss": ...}
  46. """
  47. if LEARNER_STATS_KEY in grad_info:
  48. return grad_info[LEARNER_STATS_KEY]
  49. multiagent_stats = {}
  50. for k, v in grad_info.items():
  51. if type(v) is dict:
  52. if LEARNER_STATS_KEY in v:
  53. multiagent_stats[k] = v[LEARNER_STATS_KEY]
  54. return multiagent_stats
  55. @DeveloperAPI
  56. def collect_metrics(
  57. workers: "WorkerSet",
  58. remote_worker_ids: Optional[List[int]] = None,
  59. timeout_seconds: int = 180,
  60. keep_custom_metrics: bool = False,
  61. ) -> ResultDict:
  62. """Gathers episode metrics from rollout worker set.
  63. Args:
  64. workers: WorkerSet.
  65. remote_worker_ids: Optional list of IDs of remote workers to collect
  66. metrics from.
  67. timeout_seconds: Timeout in seconds for collecting metrics from remote workers.
  68. keep_custom_metrics: Whether to keep custom metrics in the result dict as
  69. they are (True) or to aggregate them (False).
  70. Returns:
  71. A result dict of metrics.
  72. """
  73. episodes = collect_episodes(
  74. workers, remote_worker_ids, timeout_seconds=timeout_seconds
  75. )
  76. metrics = summarize_episodes(
  77. episodes, episodes, keep_custom_metrics=keep_custom_metrics
  78. )
  79. return metrics
  80. @DeveloperAPI
  81. def collect_episodes(
  82. workers: "WorkerSet",
  83. remote_worker_ids: Optional[List[int]] = None,
  84. timeout_seconds: int = 180,
  85. ) -> List[RolloutMetrics]:
  86. """Gathers new episodes metrics tuples from the given RolloutWorkers.
  87. Args:
  88. workers: WorkerSet.
  89. remote_worker_ids: Optional list of IDs of remote workers to collect
  90. metrics from.
  91. timeout_seconds: Timeout in seconds for collecting metrics from remote workers.
  92. Returns:
  93. List of RolloutMetrics.
  94. """
  95. # This will drop get_metrics() calls that are too slow.
  96. # We can potentially make this an asynchronous call if this turns
  97. # out to be a problem.
  98. metric_lists = workers.foreach_worker(
  99. lambda w: w.get_metrics(),
  100. local_worker=True,
  101. remote_worker_ids=remote_worker_ids,
  102. timeout_seconds=timeout_seconds,
  103. )
  104. if len(metric_lists) == 0:
  105. logger.warning("WARNING: collected no metrics.")
  106. episodes = []
  107. for metrics in metric_lists:
  108. episodes.extend(metrics)
  109. return episodes
  110. @DeveloperAPI
  111. def summarize_episodes(
  112. episodes: List[RolloutMetrics],
  113. new_episodes: List[RolloutMetrics] = None,
  114. keep_custom_metrics: bool = False,
  115. ) -> ResultDict:
  116. """Summarizes a set of episode metrics tuples.
  117. Args:
  118. episodes: List of most recent n episodes. This may include historical ones
  119. (not newly collected in this iteration) in order to achieve the size of
  120. the smoothing window.
  121. new_episodes: All the episodes that were completed in this iteration.
  122. keep_custom_metrics: Whether to keep custom metrics in the result dict as
  123. they are (True) or to aggregate them (False).
  124. Returns:
  125. A result dict of metrics.
  126. """
  127. if new_episodes is None:
  128. new_episodes = episodes
  129. episode_rewards = []
  130. episode_lengths = []
  131. policy_rewards = collections.defaultdict(list)
  132. custom_metrics = collections.defaultdict(list)
  133. perf_stats = collections.defaultdict(list)
  134. hist_stats = collections.defaultdict(list)
  135. episode_media = collections.defaultdict(list)
  136. connector_metrics = collections.defaultdict(list)
  137. num_faulty_episodes = 0
  138. for episode in episodes:
  139. # Faulty episodes may still carry perf_stats data.
  140. for k, v in episode.perf_stats.items():
  141. perf_stats[k].append(v)
  142. # Continue if this is a faulty episode.
  143. # There should be other meaningful stats to be collected.
  144. if episode.episode_faulty:
  145. num_faulty_episodes += 1
  146. continue
  147. episode_lengths.append(episode.episode_length)
  148. episode_rewards.append(episode.episode_reward)
  149. for k, v in episode.custom_metrics.items():
  150. custom_metrics[k].append(v)
  151. for (_, policy_id), reward in episode.agent_rewards.items():
  152. if policy_id != DEFAULT_POLICY_ID:
  153. policy_rewards[policy_id].append(reward)
  154. for k, v in episode.hist_data.items():
  155. hist_stats[k] += v
  156. for k, v in episode.media.items():
  157. episode_media[k].append(v)
  158. if hasattr(episode, "connector_metrics"):
  159. # Group connector metrics by connector_metric name for all policies
  160. for per_pipeline_metrics in episode.connector_metrics.values():
  161. for per_connector_metrics in per_pipeline_metrics.values():
  162. for connector_metric_name, val in per_connector_metrics.items():
  163. connector_metrics[connector_metric_name].append(val)
  164. if episode_rewards:
  165. min_reward = min(episode_rewards)
  166. max_reward = max(episode_rewards)
  167. avg_reward = np.mean(episode_rewards)
  168. else:
  169. min_reward = float("nan")
  170. max_reward = float("nan")
  171. avg_reward = float("nan")
  172. if episode_lengths:
  173. avg_length = np.mean(episode_lengths)
  174. else:
  175. avg_length = float("nan")
  176. # Show as histogram distributions.
  177. hist_stats["episode_reward"] = episode_rewards
  178. hist_stats["episode_lengths"] = episode_lengths
  179. policy_reward_min = {}
  180. policy_reward_mean = {}
  181. policy_reward_max = {}
  182. for policy_id, rewards in policy_rewards.copy().items():
  183. policy_reward_min[policy_id] = np.min(rewards)
  184. policy_reward_mean[policy_id] = np.mean(rewards)
  185. policy_reward_max[policy_id] = np.max(rewards)
  186. # Show as histogram distributions.
  187. hist_stats["policy_{}_reward".format(policy_id)] = rewards
  188. for k, v_list in custom_metrics.copy().items():
  189. filt = [v for v in v_list if not np.any(np.isnan(v))]
  190. if keep_custom_metrics:
  191. custom_metrics[k] = filt
  192. else:
  193. custom_metrics[k + "_mean"] = np.mean(filt)
  194. if filt:
  195. custom_metrics[k + "_min"] = np.min(filt)
  196. custom_metrics[k + "_max"] = np.max(filt)
  197. else:
  198. custom_metrics[k + "_min"] = float("nan")
  199. custom_metrics[k + "_max"] = float("nan")
  200. del custom_metrics[k]
  201. for k, v_list in perf_stats.copy().items():
  202. perf_stats[k] = np.mean(v_list)
  203. mean_connector_metrics = dict()
  204. for k, v_list in connector_metrics.items():
  205. mean_connector_metrics[k] = np.mean(v_list)
  206. return dict(
  207. episode_reward_max=max_reward,
  208. episode_reward_min=min_reward,
  209. episode_reward_mean=avg_reward,
  210. episode_len_mean=avg_length,
  211. episode_media=dict(episode_media),
  212. episodes_this_iter=len(new_episodes),
  213. policy_reward_min=policy_reward_min,
  214. policy_reward_max=policy_reward_max,
  215. policy_reward_mean=policy_reward_mean,
  216. custom_metrics=dict(custom_metrics),
  217. hist_stats=dict(hist_stats),
  218. sampler_perf=dict(perf_stats),
  219. num_faulty_episodes=num_faulty_episodes,
  220. connector_metrics=mean_connector_metrics,
  221. )