atari_wrappers.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. from collections import deque
  2. import gym
  3. from gym import spaces
  4. import numpy as np
  5. from ray.rllib.utils.images import rgb2gray, resize
  6. def is_atari(env):
  7. if (hasattr(env.observation_space, "shape")
  8. and env.observation_space.shape is not None
  9. and len(env.observation_space.shape) <= 2):
  10. return False
  11. return hasattr(env, "unwrapped") and hasattr(env.unwrapped, "ale")
  12. def get_wrapper_by_cls(env, cls):
  13. """Returns the gym env wrapper of the given class, or None."""
  14. currentenv = env
  15. while True:
  16. if isinstance(currentenv, cls):
  17. return currentenv
  18. elif isinstance(currentenv, gym.Wrapper):
  19. currentenv = currentenv.env
  20. else:
  21. return None
  22. class MonitorEnv(gym.Wrapper):
  23. def __init__(self, env=None):
  24. """Record episodes stats prior to EpisodicLifeEnv, etc."""
  25. gym.Wrapper.__init__(self, env)
  26. self._current_reward = None
  27. self._num_steps = None
  28. self._total_steps = None
  29. self._episode_rewards = []
  30. self._episode_lengths = []
  31. self._num_episodes = 0
  32. self._num_returned = 0
  33. def reset(self, **kwargs):
  34. obs = self.env.reset(**kwargs)
  35. if self._total_steps is None:
  36. self._total_steps = sum(self._episode_lengths)
  37. if self._current_reward is not None:
  38. self._episode_rewards.append(self._current_reward)
  39. self._episode_lengths.append(self._num_steps)
  40. self._num_episodes += 1
  41. self._current_reward = 0
  42. self._num_steps = 0
  43. return obs
  44. def step(self, action):
  45. obs, rew, done, info = self.env.step(action)
  46. self._current_reward += rew
  47. self._num_steps += 1
  48. self._total_steps += 1
  49. return (obs, rew, done, info)
  50. def get_episode_rewards(self):
  51. return self._episode_rewards
  52. def get_episode_lengths(self):
  53. return self._episode_lengths
  54. def get_total_steps(self):
  55. return self._total_steps
  56. def next_episode_results(self):
  57. for i in range(self._num_returned, len(self._episode_rewards)):
  58. yield (self._episode_rewards[i], self._episode_lengths[i])
  59. self._num_returned = len(self._episode_rewards)
  60. class NoopResetEnv(gym.Wrapper):
  61. def __init__(self, env, noop_max=30):
  62. """Sample initial states by taking random number of no-ops on reset.
  63. No-op is assumed to be action 0.
  64. """
  65. gym.Wrapper.__init__(self, env)
  66. self.noop_max = noop_max
  67. self.override_num_noops = None
  68. self.noop_action = 0
  69. assert env.unwrapped.get_action_meanings()[0] == "NOOP"
  70. def reset(self, **kwargs):
  71. """ Do no-op action for a number of steps in [1, noop_max]."""
  72. self.env.reset(**kwargs)
  73. if self.override_num_noops is not None:
  74. noops = self.override_num_noops
  75. else:
  76. noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
  77. assert noops > 0
  78. obs = None
  79. for _ in range(noops):
  80. obs, _, done, _ = self.env.step(self.noop_action)
  81. if done:
  82. obs = self.env.reset(**kwargs)
  83. return obs
  84. def step(self, ac):
  85. return self.env.step(ac)
  86. class ClipRewardEnv(gym.RewardWrapper):
  87. def __init__(self, env):
  88. gym.RewardWrapper.__init__(self, env)
  89. def reward(self, reward):
  90. """Bin reward to {+1, 0, -1} by its sign."""
  91. return np.sign(reward)
  92. class FireResetEnv(gym.Wrapper):
  93. def __init__(self, env):
  94. """Take action on reset.
  95. For environments that are fixed until firing."""
  96. gym.Wrapper.__init__(self, env)
  97. assert env.unwrapped.get_action_meanings()[1] == "FIRE"
  98. assert len(env.unwrapped.get_action_meanings()) >= 3
  99. def reset(self, **kwargs):
  100. self.env.reset(**kwargs)
  101. obs, _, done, _ = self.env.step(1)
  102. if done:
  103. self.env.reset(**kwargs)
  104. obs, _, done, _ = self.env.step(2)
  105. if done:
  106. self.env.reset(**kwargs)
  107. return obs
  108. def step(self, ac):
  109. return self.env.step(ac)
  110. class EpisodicLifeEnv(gym.Wrapper):
  111. def __init__(self, env):
  112. """Make end-of-life == end-of-episode, but only reset on true game over.
  113. Done by DeepMind for the DQN and co. since it helps value estimation.
  114. """
  115. gym.Wrapper.__init__(self, env)
  116. self.lives = 0
  117. self.was_real_done = True
  118. def step(self, action):
  119. obs, reward, done, info = self.env.step(action)
  120. self.was_real_done = done
  121. # check current lives, make loss of life terminal,
  122. # then update lives to handle bonus lives
  123. lives = self.env.unwrapped.ale.lives()
  124. if lives < self.lives and lives > 0:
  125. # for Qbert sometimes we stay in lives == 0 condtion for a few fr
  126. # so its important to keep lives > 0, so that we only reset once
  127. # the environment advertises done.
  128. done = True
  129. self.lives = lives
  130. return obs, reward, done, info
  131. def reset(self, **kwargs):
  132. """Reset only when lives are exhausted.
  133. This way all states are still reachable even though lives are episodic,
  134. and the learner need not know about any of this behind-the-scenes.
  135. """
  136. if self.was_real_done:
  137. obs = self.env.reset(**kwargs)
  138. else:
  139. # no-op step to advance from terminal/lost life state
  140. obs, _, _, _ = self.env.step(0)
  141. self.lives = self.env.unwrapped.ale.lives()
  142. return obs
  143. class MaxAndSkipEnv(gym.Wrapper):
  144. def __init__(self, env, skip=4):
  145. """Return only every `skip`-th frame"""
  146. gym.Wrapper.__init__(self, env)
  147. # most recent raw observations (for max pooling across time steps)
  148. self._obs_buffer = np.zeros(
  149. (2, ) + env.observation_space.shape, dtype=np.uint8)
  150. self._skip = skip
  151. def step(self, action):
  152. """Repeat action, sum reward, and max over last observations."""
  153. total_reward = 0.0
  154. done = None
  155. for i in range(self._skip):
  156. obs, reward, done, info = self.env.step(action)
  157. if i == self._skip - 2:
  158. self._obs_buffer[0] = obs
  159. if i == self._skip - 1:
  160. self._obs_buffer[1] = obs
  161. total_reward += reward
  162. if done:
  163. break
  164. # Note that the observation on the done=True frame
  165. # doesn't matter
  166. max_frame = self._obs_buffer.max(axis=0)
  167. return max_frame, total_reward, done, info
  168. def reset(self, **kwargs):
  169. return self.env.reset(**kwargs)
  170. class WarpFrame(gym.ObservationWrapper):
  171. def __init__(self, env, dim):
  172. """Warp frames to the specified size (dim x dim)."""
  173. gym.ObservationWrapper.__init__(self, env)
  174. self.width = dim
  175. self.height = dim
  176. self.observation_space = spaces.Box(
  177. low=0,
  178. high=255,
  179. shape=(self.height, self.width, 1),
  180. dtype=np.uint8)
  181. def observation(self, frame):
  182. frame = rgb2gray(frame)
  183. frame = resize(frame, height=self.height, width=self.width)
  184. return frame[:, :, None]
  185. # TODO: (sven) Deprecated class. Remove once traj. view is the norm.
  186. class FrameStack(gym.Wrapper):
  187. def __init__(self, env, k):
  188. """Stack k last frames."""
  189. gym.Wrapper.__init__(self, env)
  190. self.k = k
  191. self.frames = deque([], maxlen=k)
  192. shp = env.observation_space.shape
  193. self.observation_space = spaces.Box(
  194. low=0,
  195. high=255,
  196. shape=(shp[0], shp[1], shp[2] * k),
  197. dtype=env.observation_space.dtype)
  198. def reset(self):
  199. ob = self.env.reset()
  200. for _ in range(self.k):
  201. self.frames.append(ob)
  202. return self._get_ob()
  203. def step(self, action):
  204. ob, reward, done, info = self.env.step(action)
  205. self.frames.append(ob)
  206. return self._get_ob(), reward, done, info
  207. def _get_ob(self):
  208. assert len(self.frames) == self.k
  209. return np.concatenate(self.frames, axis=2)
  210. class FrameStackTrajectoryView(gym.ObservationWrapper):
  211. def __init__(self, env):
  212. """No stacking. Trajectory View API takes care of this."""
  213. gym.Wrapper.__init__(self, env)
  214. shp = env.observation_space.shape
  215. assert shp[2] == 1
  216. self.observation_space = spaces.Box(
  217. low=0,
  218. high=255,
  219. shape=(shp[0], shp[1]),
  220. dtype=env.observation_space.dtype)
  221. def observation(self, observation):
  222. return np.squeeze(observation, axis=-1)
  223. class ScaledFloatFrame(gym.ObservationWrapper):
  224. def __init__(self, env):
  225. gym.ObservationWrapper.__init__(self, env)
  226. self.observation_space = gym.spaces.Box(
  227. low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
  228. def observation(self, observation):
  229. # careful! This undoes the memory optimization, use
  230. # with smaller replay buffers only.
  231. return np.array(observation).astype(np.float32) / 255.0
  232. def wrap_deepmind(env, dim=84, framestack=True):
  233. """Configure environment for DeepMind-style Atari.
  234. Note that we assume reward clipping is done outside the wrapper.
  235. Args:
  236. env (EnvType): The env object to wrap.
  237. dim (int): Dimension to resize observations to (dim x dim).
  238. framestack (bool): Whether to framestack observations.
  239. """
  240. env = MonitorEnv(env)
  241. env = NoopResetEnv(env, noop_max=30)
  242. if env.spec is not None and "NoFrameskip" in env.spec.id:
  243. env = MaxAndSkipEnv(env, skip=4)
  244. env = EpisodicLifeEnv(env)
  245. if "FIRE" in env.unwrapped.get_action_meanings():
  246. env = FireResetEnv(env)
  247. env = WarpFrame(env, dim)
  248. # env = ScaledFloatFrame(env) # TODO: use for dqn?
  249. # env = ClipRewardEnv(env) # reward clipping is handled by policy eval
  250. # 4x image framestacking.
  251. if framestack is True:
  252. env = FrameStack(env, 4)
  253. return env