repeat_after_me_env.py 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. import gym
  2. from gym.spaces import Box, Discrete
  3. import numpy as np
  4. class RepeatAfterMeEnv(gym.Env):
  5. """Env in which the observation at timestep minus n must be repeated."""
  6. def __init__(self, config=None):
  7. config = config or {}
  8. if config.get("continuous"):
  9. self.observation_space = Box(-1.0, 1.0, (2, ))
  10. else:
  11. self.observation_space = Discrete(2)
  12. self.action_space = self.observation_space
  13. # Note: Set `repeat_delay` to 0 for simply repeating the seen
  14. # observation (no delay).
  15. self.delay = config.get("repeat_delay", 1)
  16. self.episode_len = config.get("episode_len", 100)
  17. self.history = []
  18. def reset(self):
  19. self.history = [0] * self.delay
  20. return self._next_obs()
  21. def step(self, action):
  22. obs = self.history[-(1 + self.delay)]
  23. # Box: -abs(diff).
  24. if isinstance(self.action_space, Box):
  25. reward = -np.sum(np.abs(action - obs))
  26. # Discrete: +1.0 if exact match, -1.0 otherwise.
  27. if isinstance(self.action_space, Discrete):
  28. reward = 1.0 if action == obs else -1.0
  29. done = len(self.history) > self.episode_len
  30. return self._next_obs(), reward, done, {}
  31. def _next_obs(self):
  32. if isinstance(self.observation_space, Box):
  33. token = np.random.random(size=(2, ))
  34. else:
  35. token = np.random.choice([0, 1])
  36. self.history.append(token)
  37. return token