ant_rand_goal.py 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. from gym.envs.mujoco.mujoco_env import MujocoEnv
  2. from gym.utils import EzPickle
  3. import numpy as np
  4. from ray.rllib.env.apis.task_settable_env import TaskSettableEnv
  5. class AntRandGoalEnv(EzPickle, MujocoEnv, TaskSettableEnv):
  6. """Ant Environment that randomizes goals as tasks
  7. Goals are randomly sampled 2D positions
  8. """
  9. def __init__(self):
  10. self.set_task(self.sample_tasks(1)[0])
  11. MujocoEnv.__init__(self, "ant.xml", 5)
  12. EzPickle.__init__(self)
  13. def sample_tasks(self, n_tasks):
  14. # Samples a goal position (2x1 position ector)
  15. a = np.random.random(n_tasks) * 2 * np.pi
  16. r = 3 * np.random.random(n_tasks)**0.5
  17. return np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
  18. def set_task(self, task):
  19. """
  20. Args:
  21. task: task of the meta-learning environment
  22. """
  23. self.goal_pos = task
  24. def get_task(self):
  25. """
  26. Returns:
  27. task: task of the meta-learning environment
  28. """
  29. return self.goal_pos
  30. def step(self, a):
  31. self.do_simulation(a, self.frame_skip)
  32. xposafter = self.get_body_com("torso")
  33. goal_reward = -np.sum(np.abs(
  34. xposafter[:2] - self.goal_pos)) # make it happy, not suicidal
  35. ctrl_cost = .1 * np.square(a).sum()
  36. contact_cost = 0.5 * 1e-3 * np.sum(
  37. np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
  38. # survive_reward = 1.0
  39. survive_reward = 0.0
  40. reward = goal_reward - ctrl_cost - contact_cost + survive_reward
  41. # notdone = np.isfinite(state).all() and 1.0 >= state[2] >= 0.
  42. # done = not notdone
  43. done = False
  44. ob = self._get_obs()
  45. return ob, reward, done, dict(
  46. reward_forward=goal_reward,
  47. reward_ctrl=-ctrl_cost,
  48. reward_contact=-contact_cost,
  49. reward_survive=survive_reward)
  50. def _get_obs(self):
  51. return np.concatenate([
  52. self.sim.data.qpos.flat,
  53. self.sim.data.qvel.flat,
  54. np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
  55. ])
  56. def reset_model(self):
  57. qpos = self.init_qpos + self.np_random.uniform(
  58. size=self.model.nq, low=-.1, high=.1)
  59. qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
  60. self.set_state(qpos, qvel)
  61. return self._get_obs()
  62. def viewer_setup(self):
  63. self.viewer.cam.distance = self.model.stat.extent * 0.5