parametric_actions_cartpole.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. """Example of handling variable length and/or parametric action spaces.
  2. This is a toy example of the action-embedding based approach for handling large
  3. discrete action spaces (potentially infinite in size), similar to this:
  4. https://neuro.cs.ut.ee/the-use-of-embeddings-in-openai-five/
  5. This currently works with RLlib's policy gradient style algorithms
  6. (e.g., PG, PPO, IMPALA, A2C) and also DQN.
  7. Note that since the model outputs now include "-inf" tf.float32.min
  8. values, not all algorithm options are supported at the moment. For example,
  9. algorithms might crash if they don't properly ignore the -inf action scores.
  10. Working configurations are given below.
  11. """
  12. import argparse
  13. import os
  14. import ray
  15. from ray import tune
  16. from ray.rllib.examples.env.parametric_actions_cartpole import \
  17. ParametricActionsCartPole
  18. from ray.rllib.examples.models.parametric_actions_model import \
  19. ParametricActionsModel, TorchParametricActionsModel
  20. from ray.rllib.models import ModelCatalog
  21. from ray.rllib.utils.test_utils import check_learning_achieved
  22. from ray.tune.registry import register_env
  23. parser = argparse.ArgumentParser()
  24. parser.add_argument(
  25. "--run",
  26. type=str,
  27. default="PPO",
  28. help="The RLlib-registered algorithm to use.")
  29. parser.add_argument(
  30. "--framework",
  31. choices=["tf", "tf2", "tfe", "torch"],
  32. default="tf",
  33. help="The DL framework specifier.")
  34. parser.add_argument(
  35. "--as-test",
  36. action="store_true",
  37. help="Whether this script should be run as a test: --stop-reward must "
  38. "be achieved within --stop-timesteps AND --stop-iters.")
  39. parser.add_argument(
  40. "--stop-iters",
  41. type=int,
  42. default=200,
  43. help="Number of iterations to train.")
  44. parser.add_argument(
  45. "--stop-timesteps",
  46. type=int,
  47. default=100000,
  48. help="Number of timesteps to train.")
  49. parser.add_argument(
  50. "--stop-reward",
  51. type=float,
  52. default=150.0,
  53. help="Reward at which we stop training.")
  54. if __name__ == "__main__":
  55. args = parser.parse_args()
  56. ray.init()
  57. register_env("pa_cartpole", lambda _: ParametricActionsCartPole(10))
  58. ModelCatalog.register_custom_model(
  59. "pa_model", TorchParametricActionsModel
  60. if args.framework == "torch" else ParametricActionsModel)
  61. if args.run == "DQN":
  62. cfg = {
  63. # TODO(ekl) we need to set these to prevent the masked values
  64. # from being further processed in DistributionalQModel, which
  65. # would mess up the masking. It is possible to support these if we
  66. # defined a custom DistributionalQModel that is aware of masking.
  67. "hiddens": [],
  68. "dueling": False,
  69. }
  70. else:
  71. cfg = {}
  72. config = dict(
  73. {
  74. "env": "pa_cartpole",
  75. "model": {
  76. "custom_model": "pa_model",
  77. },
  78. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
  79. "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
  80. "num_workers": 0,
  81. "framework": args.framework,
  82. },
  83. **cfg)
  84. stop = {
  85. "training_iteration": args.stop_iters,
  86. "timesteps_total": args.stop_timesteps,
  87. "episode_reward_mean": args.stop_reward,
  88. }
  89. results = tune.run(args.run, stop=stop, config=config, verbose=1)
  90. if args.as_test:
  91. check_learning_achieved(results, args.stop_reward)
  92. ray.shutdown()