remote_vector_env_with_custom_api.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. """
  2. This script demonstrates how one can specify custom env APIs in
  3. combination with RLlib's `remote_worker_envs` setting, which
  4. parallelizes individual sub-envs within a vector env by making each
  5. one a ray Actor.
  6. You can access your Env's API via a custom callback as shown below.
  7. """
  8. import argparse
  9. import gym
  10. import os
  11. import ray
  12. from ray.rllib.agents.callbacks import DefaultCallbacks
  13. from ray.rllib.env.apis.task_settable_env import TaskSettableEnv
  14. from ray.rllib.utils.test_utils import check_learning_achieved
  15. from ray import tune
  16. parser = argparse.ArgumentParser()
  17. parser.add_argument(
  18. "--run",
  19. type=str,
  20. default="PPO",
  21. help="The RLlib-registered algorithm to use.")
  22. parser.add_argument(
  23. "--framework",
  24. choices=["tf", "tf2", "tfe", "torch"],
  25. default="tf",
  26. help="The DL framework specifier.")
  27. parser.add_argument("--num-workers", type=int, default=1)
  28. # This should be >1, otherwise, remote envs make no sense.
  29. parser.add_argument("--num-envs-per-worker", type=int, default=4)
  30. parser.add_argument(
  31. "--as-test",
  32. action="store_true",
  33. help="Whether this script should be run as a test: --stop-reward must "
  34. "be achieved within --stop-timesteps AND --stop-iters.")
  35. parser.add_argument(
  36. "--stop-iters",
  37. type=int,
  38. default=50,
  39. help="Number of iterations to train.")
  40. parser.add_argument(
  41. "--stop-timesteps",
  42. type=int,
  43. default=100000,
  44. help="Number of timesteps to train.")
  45. parser.add_argument(
  46. "--stop-reward",
  47. type=float,
  48. default=180.0,
  49. help="Reward at which we stop training.")
  50. class NonVectorizedEnvToBeVectorizedIntoRemoteVectorEnv(TaskSettableEnv):
  51. """Class for a single sub-env to be vectorized into RemoteVectorEnv.
  52. If you specify this class directly under the "env" config key, RLlib
  53. will auto-wrap
  54. Note that you may implement your own custom APIs. Here, we demonstrate
  55. using RLlib's TaskSettableEnv API (which is a simple sub-class
  56. of gym.Env).
  57. """
  58. def __init__(self, config=None):
  59. self.action_space = gym.spaces.Box(0, 1, shape=(1, ))
  60. self.observation_space = gym.spaces.Box(0, 1, shape=(2, ))
  61. self.task = 1
  62. def reset(self):
  63. self.steps = 0
  64. return self.observation_space.sample()
  65. def step(self, action):
  66. self.steps += 1
  67. return self.observation_space.sample(), 0, self.steps > 10, {}
  68. def set_task(self, task) -> None:
  69. """We can set the task of each sub-env (ray actor)"""
  70. print("Task set to {}".format(task))
  71. self.task = task
  72. class TaskSettingCallback(DefaultCallbacks):
  73. """Custom callback to verify, we can set the task on each remote sub-env.
  74. """
  75. def on_train_result(self, *, trainer, result: dict, **kwargs) -> None:
  76. """ Curriculum learning as seen in Ray docs """
  77. if result["episode_reward_mean"] > 0.0:
  78. phase = 0
  79. else:
  80. phase = 1
  81. # Sub-envs are now ray.actor.ActorHandles, so we have to add
  82. # `remote()` here.
  83. trainer.workers.foreach_env(lambda env: env.set_task.remote(phase))
  84. if __name__ == "__main__":
  85. args = parser.parse_args()
  86. ray.init(num_cpus=6)
  87. config = {
  88. # Specify your custom (single, non-vectorized) env directly as a
  89. # class. This way, RLlib can auto-create Actors from this class
  90. # and handle everything correctly.
  91. "env": NonVectorizedEnvToBeVectorizedIntoRemoteVectorEnv,
  92. # Set up our own callbacks.
  93. "callbacks": TaskSettingCallback,
  94. # Force sub-envs to be ray.actor.ActorHandles, so we can step
  95. # through them in parallel.
  96. "remote_worker_envs": True,
  97. # How many RolloutWorkers (each with n environment copies:
  98. # `num_envs_per_worker`)?
  99. "num_workers": args.num_workers,
  100. # This setting should not really matter as it does not affect the
  101. # number of GPUs reserved for each worker.
  102. "num_envs_per_worker": args.num_envs_per_worker,
  103. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
  104. "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
  105. "framework": args.framework,
  106. }
  107. stop = {
  108. "training_iteration": args.stop_iters,
  109. "timesteps_total": args.stop_timesteps,
  110. "episode_reward_mean": args.stop_reward,
  111. }
  112. results = tune.run(args.run, config=config, stop=stop, verbose=1)
  113. if args.as_test:
  114. check_learning_achieved(results, args.stop_reward)
  115. ray.shutdown()