multi_agent_env.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. import gym
  2. from typing import Callable, Dict, List, Tuple, Type, Union
  3. from ray.rllib.env.env_context import EnvContext
  4. from ray.rllib.utils.annotations import override, PublicAPI
  5. from ray.rllib.utils.typing import AgentID, EnvType, MultiAgentDict
  6. # If the obs space is Dict type, look for the global state under this key.
  7. ENV_STATE = "state"
  8. @PublicAPI
  9. class MultiAgentEnv(gym.Env):
  10. """An environment that hosts multiple independent agents.
  11. Agents are identified by (string) agent ids. Note that these "agents" here
  12. are not to be confused with RLlib agents.
  13. Examples:
  14. >>> env = MyMultiAgentEnv()
  15. >>> obs = env.reset()
  16. >>> print(obs)
  17. {
  18. "car_0": [2.4, 1.6],
  19. "car_1": [3.4, -3.2],
  20. "traffic_light_1": [0, 3, 5, 1],
  21. }
  22. >>> obs, rewards, dones, infos = env.step(
  23. ... action_dict={
  24. ... "car_0": 1, "car_1": 0, "traffic_light_1": 2,
  25. ... })
  26. >>> print(rewards)
  27. {
  28. "car_0": 3,
  29. "car_1": -1,
  30. "traffic_light_1": 0,
  31. }
  32. >>> print(dones)
  33. {
  34. "car_0": False, # car_0 is still running
  35. "car_1": True, # car_1 is done
  36. "__all__": False, # the env is not done
  37. }
  38. >>> print(infos)
  39. {
  40. "car_0": {}, # info for car_0
  41. "car_1": {}, # info for car_1
  42. }
  43. """
  44. @PublicAPI
  45. def reset(self) -> MultiAgentDict:
  46. """Resets the env and returns observations from ready agents.
  47. Returns:
  48. New observations for each ready agent.
  49. """
  50. raise NotImplementedError
  51. @PublicAPI
  52. def step(
  53. self, action_dict: MultiAgentDict
  54. ) -> Tuple[MultiAgentDict, MultiAgentDict, MultiAgentDict, MultiAgentDict]:
  55. """Returns observations from ready agents.
  56. The returns are dicts mapping from agent_id strings to values. The
  57. number of agents in the env can vary over time.
  58. Returns:
  59. Tuple containing 1) new observations for
  60. each ready agent, 2) reward values for each ready agent. If
  61. the episode is just started, the value will be None.
  62. 3) Done values for each ready agent. The special key
  63. "__all__" (required) is used to indicate env termination.
  64. 4) Optional info values for each agent id.
  65. """
  66. raise NotImplementedError
  67. @PublicAPI
  68. def render(self, mode=None) -> None:
  69. """Tries to render the environment."""
  70. # By default, do nothing.
  71. pass
  72. # yapf: disable
  73. # __grouping_doc_begin__
  74. @PublicAPI
  75. def with_agent_groups(
  76. self,
  77. groups: Dict[str, List[AgentID]],
  78. obs_space: gym.Space = None,
  79. act_space: gym.Space = None) -> "MultiAgentEnv":
  80. """Convenience method for grouping together agents in this env.
  81. An agent group is a list of agent ids that are mapped to a single
  82. logical agent. All agents of the group must act at the same time in the
  83. environment. The grouped agent exposes Tuple action and observation
  84. spaces that are the concatenated action and obs spaces of the
  85. individual agents.
  86. The rewards of all the agents in a group are summed. The individual
  87. agent rewards are available under the "individual_rewards" key of the
  88. group info return.
  89. Agent grouping is required to leverage algorithms such as Q-Mix.
  90. This API is experimental.
  91. Args:
  92. groups: Mapping from group id to a list of the agent ids
  93. of group members. If an agent id is not present in any group
  94. value, it will be left ungrouped.
  95. obs_space: Optional observation space for the grouped
  96. env. Must be a tuple space.
  97. act_space: Optional action space for the grouped env.
  98. Must be a tuple space.
  99. Examples:
  100. >>> env = YourMultiAgentEnv(...)
  101. >>> grouped_env = env.with_agent_groups(env, {
  102. ... "group1": ["agent1", "agent2", "agent3"],
  103. ... "group2": ["agent4", "agent5"],
  104. ... })
  105. """
  106. from ray.rllib.env.wrappers.group_agents_wrapper import \
  107. GroupAgentsWrapper
  108. return GroupAgentsWrapper(self, groups, obs_space, act_space)
  109. # __grouping_doc_end__
  110. # yapf: enable
  111. def make_multi_agent(
  112. env_name_or_creator: Union[str, Callable[[EnvContext], EnvType]],
  113. ) -> Type["MultiAgentEnv"]:
  114. """Convenience wrapper for any single-agent env to be converted into MA.
  115. Agent IDs are int numbers starting from 0 (first agent).
  116. Args:
  117. env_name_or_creator: String specifier or env_maker function taking
  118. an EnvContext object as only arg and returning a gym.Env.
  119. Returns:
  120. New MultiAgentEnv class to be used as env.
  121. The constructor takes a config dict with `num_agents` key
  122. (default=1). The rest of the config dict will be passed on to the
  123. underlying single-agent env's constructor.
  124. Examples:
  125. >>> # By gym string:
  126. >>> ma_cartpole_cls = make_multi_agent("CartPole-v0")
  127. >>> # Create a 2 agent multi-agent cartpole.
  128. >>> ma_cartpole = ma_cartpole_cls({"num_agents": 2})
  129. >>> obs = ma_cartpole.reset()
  130. >>> print(obs)
  131. ... {0: [...], 1: [...]}
  132. >>> # By env-maker callable:
  133. >>> ma_stateless_cartpole_cls = make_multi_agent(
  134. ... lambda config: StatelessCartPole(config))
  135. >>> # Create a 2 agent multi-agent stateless cartpole.
  136. >>> ma_stateless_cartpole = ma_stateless_cartpole_cls(
  137. ... {"num_agents": 2})
  138. """
  139. class MultiEnv(MultiAgentEnv):
  140. def __init__(self, config=None):
  141. config = config or {}
  142. num = config.pop("num_agents", 1)
  143. if isinstance(env_name_or_creator, str):
  144. self.agents = [
  145. gym.make(env_name_or_creator) for _ in range(num)
  146. ]
  147. else:
  148. self.agents = [env_name_or_creator(config) for _ in range(num)]
  149. self.dones = set()
  150. self.observation_space = self.agents[0].observation_space
  151. self.action_space = self.agents[0].action_space
  152. @override(MultiAgentEnv)
  153. def reset(self):
  154. self.dones = set()
  155. return {i: a.reset() for i, a in enumerate(self.agents)}
  156. @override(MultiAgentEnv)
  157. def step(self, action_dict):
  158. obs, rew, done, info = {}, {}, {}, {}
  159. for i, action in action_dict.items():
  160. obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
  161. if done[i]:
  162. self.dones.add(i)
  163. done["__all__"] = len(self.dones) == len(self.agents)
  164. return obs, rew, done, info
  165. @override(MultiAgentEnv)
  166. def render(self, mode=None):
  167. return self.agents[0].render(mode)
  168. return MultiEnv