recsim_wrapper.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. """Wrap Google's RecSim environment for RLlib
  2. RecSim is a configurable recommender systems simulation platform.
  3. Source: https://github.com/google-research/recsim
  4. """
  5. from collections import OrderedDict
  6. import gym
  7. from gym import spaces
  8. import numpy as np
  9. from recsim.environments import interest_evolution
  10. from typing import List
  11. from ray.rllib.utils.error import UnsupportedSpaceException
  12. from ray.tune.registry import register_env
  13. from ray.rllib.utils.spaces.space_utils import convert_element_to_space_type
  14. class RecSimObservationSpaceWrapper(gym.ObservationWrapper):
  15. """Fix RecSim environment's observation space
  16. In RecSim's observation spaces, the "doc" field is a dictionary keyed by
  17. document IDs. Those IDs are changing every step, thus generating a
  18. different observation space in each time. This causes issues for RLlib
  19. because it expects the observation space to remain the same across steps.
  20. This environment wrapper fixes that by reindexing the documents by their
  21. positions in the list.
  22. """
  23. def __init__(self, env: gym.Env):
  24. super().__init__(env)
  25. obs_space = self.env.observation_space
  26. doc_space = spaces.Dict(
  27. OrderedDict(
  28. [(str(k), doc)
  29. for k, (_,
  30. doc) in enumerate(obs_space["doc"].spaces.items())]))
  31. self.observation_space = spaces.Dict(
  32. OrderedDict([
  33. ("user", obs_space["user"]),
  34. ("doc", doc_space),
  35. ("response", obs_space["response"]),
  36. ]))
  37. self._sampled_obs = self.observation_space.sample()
  38. def observation(self, obs):
  39. new_obs = OrderedDict()
  40. new_obs["user"] = obs["user"]
  41. new_obs["doc"] = {
  42. str(k): v
  43. for k, (_, v) in enumerate(obs["doc"].items())
  44. }
  45. new_obs["response"] = obs["response"]
  46. new_obs = convert_element_to_space_type(new_obs, self._sampled_obs)
  47. return new_obs
  48. class RecSimResetWrapper(gym.Wrapper):
  49. """Fix RecSim environment's reset() and close() function
  50. RecSim's reset() function returns an observation without the "response"
  51. field, breaking RLlib's check. This wrapper fixes that by assigning a
  52. random "response".
  53. RecSim's close() function raises NotImplementedError. We change the
  54. behavior to doing nothing.
  55. """
  56. def __init__(self, env: gym.Env):
  57. super().__init__(env)
  58. self._sampled_obs = self.env.observation_space.sample()
  59. def reset(self):
  60. obs = super().reset()
  61. obs["response"] = self.env.observation_space["response"].sample()
  62. obs = convert_element_to_space_type(obs, self._sampled_obs)
  63. return obs
  64. def close(self):
  65. pass
  66. class MultiDiscreteToDiscreteActionWrapper(gym.ActionWrapper):
  67. """Convert the action space from MultiDiscrete to Discrete
  68. At this moment, RLlib's DQN algorithms only work on Discrete action space.
  69. This wrapper allows us to apply DQN algorithms to the RecSim environment.
  70. """
  71. def __init__(self, env: gym.Env):
  72. super().__init__(env)
  73. if not isinstance(env.action_space, spaces.MultiDiscrete):
  74. raise UnsupportedSpaceException(
  75. f"Action space {env.action_space} "
  76. f"is not supported by {self.__class__.__name__}")
  77. self.action_space_dimensions = env.action_space.nvec
  78. self.action_space = spaces.Discrete(
  79. np.prod(self.action_space_dimensions))
  80. def action(self, action: int) -> List[int]:
  81. """Convert a Discrete action to a MultiDiscrete action"""
  82. multi_action = [None] * len(self.action_space_dimensions)
  83. for idx, n in enumerate(self.action_space_dimensions):
  84. action, dim_action = divmod(action, n)
  85. multi_action[idx] = dim_action
  86. return multi_action
  87. def make_recsim_env(config):
  88. DEFAULT_ENV_CONFIG = {
  89. "num_candidates": 10,
  90. "slate_size": 2,
  91. "resample_documents": True,
  92. "seed": 0,
  93. "convert_to_discrete_action_space": False,
  94. }
  95. env_config = DEFAULT_ENV_CONFIG.copy()
  96. env_config.update(config)
  97. env = interest_evolution.create_environment(env_config)
  98. env = RecSimResetWrapper(env)
  99. env = RecSimObservationSpaceWrapper(env)
  100. if env_config and env_config["convert_to_discrete_action_space"]:
  101. env = MultiDiscreteToDiscreteActionWrapper(env)
  102. return env
  103. env_name = "RecSim-v1"
  104. register_env(name=env_name, env_creator=make_recsim_env)