ant_rand_goal.py 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. from gymnasium.envs.mujoco.mujoco_env import MujocoEnv
  2. from gymnasium.utils import EzPickle
  3. import numpy as np
  4. from ray.rllib.env.apis.task_settable_env import TaskSettableEnv
  5. class AntRandGoalEnv(EzPickle, MujocoEnv, TaskSettableEnv):
  6. """Ant Environment that randomizes goals as tasks
  7. Goals are randomly sampled 2D positions
  8. """
  9. def __init__(self):
  10. self.set_task(self.sample_tasks(1)[0])
  11. MujocoEnv.__init__(self, "ant.xml", 5)
  12. EzPickle.__init__(self)
  13. def sample_tasks(self, n_tasks):
  14. # Samples a goal position (2x1 position ector)
  15. a = np.random.random(n_tasks) * 2 * np.pi
  16. r = 3 * np.random.random(n_tasks) ** 0.5
  17. return np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
  18. def set_task(self, task):
  19. """
  20. Args:
  21. task: task of the meta-learning environment
  22. """
  23. self.goal_pos = task
  24. def get_task(self):
  25. """
  26. Returns:
  27. task: task of the meta-learning environment
  28. """
  29. return self.goal_pos
  30. def step(self, a):
  31. self.do_simulation(a, self.frame_skip)
  32. xposafter = self.get_body_com("torso")
  33. goal_reward = -np.sum(
  34. np.abs(xposafter[:2] - self.goal_pos)
  35. ) # make it happy, not suicidal
  36. ctrl_cost = 0.1 * np.square(a).sum()
  37. contact_cost = (
  38. 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
  39. )
  40. # survive_reward = 1.0
  41. survive_reward = 0.0
  42. reward = goal_reward - ctrl_cost - contact_cost + survive_reward
  43. # notdone = np.isfinite(state).all() and 1.0 >= state[2] >= 0.
  44. # done = not notdone
  45. done = False
  46. ob = self._get_obs()
  47. return (
  48. ob,
  49. reward,
  50. done,
  51. dict(
  52. reward_forward=goal_reward,
  53. reward_ctrl=-ctrl_cost,
  54. reward_contact=-contact_cost,
  55. reward_survive=survive_reward,
  56. ),
  57. )
  58. def _get_obs(self):
  59. return np.concatenate(
  60. [
  61. self.sim.data.qpos.flat,
  62. self.sim.data.qvel.flat,
  63. np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
  64. ]
  65. )
  66. def reset_model(self):
  67. qpos = self.init_qpos + self.np_random.uniform(
  68. size=self.model.nq, low=-0.1, high=0.1
  69. )
  70. qvel = self.init_qvel + self.np_random.randn(self.model.nv) * 0.1
  71. self.set_state(qpos, qvel)
  72. return self._get_obs()
  73. def viewer_setup(self):
  74. self.viewer.cam.distance = self.model.stat.extent * 0.5