parametric_actions_cartpole_embeddings_learnt_by_model.py 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. """Example of handling variable length and/or parametric action spaces.
  2. This is a toy example of the action-embedding based approach for handling large
  3. discrete action spaces (potentially infinite in size), similar to this:
  4. https://neuro.cs.ut.ee/the-use-of-embeddings-in-openai-five/
  5. This currently works with RLlib's policy gradient style algorithms
  6. (e.g., PG, PPO, IMPALA, A2C) and also DQN.
  7. Note that since the model outputs now include "-inf" tf.float32.min
  8. values, not all algorithm options are supported at the moment. For example,
  9. algorithms might crash if they don't properly ignore the -inf action scores.
  10. Working configurations are given below.
  11. """
  12. import argparse
  13. import os
  14. import ray
  15. from ray import tune
  16. from ray.rllib.examples.env.parametric_actions_cartpole import \
  17. ParametricActionsCartPoleNoEmbeddings
  18. from ray.rllib.examples.models.parametric_actions_model import \
  19. ParametricActionsModelThatLearnsEmbeddings
  20. from ray.rllib.models import ModelCatalog
  21. from ray.rllib.utils.test_utils import check_learning_achieved
  22. from ray.tune.registry import register_env
  23. parser = argparse.ArgumentParser()
  24. parser.add_argument("--run", type=str, default="PPO")
  25. parser.add_argument(
  26. "--framework",
  27. choices=["tf", "tf2", "tfe"],
  28. default="tf",
  29. help="The DL framework specifier (torch not supported yet "
  30. "due to lack of model).")
  31. parser.add_argument("--as-test", action="store_true")
  32. parser.add_argument("--stop-iters", type=int, default=200)
  33. parser.add_argument("--stop-reward", type=float, default=150.0)
  34. parser.add_argument("--stop-timesteps", type=int, default=100000)
  35. if __name__ == "__main__":
  36. args = parser.parse_args()
  37. ray.init()
  38. register_env("pa_cartpole",
  39. lambda _: ParametricActionsCartPoleNoEmbeddings(10))
  40. ModelCatalog.register_custom_model(
  41. "pa_model", ParametricActionsModelThatLearnsEmbeddings)
  42. if args.run == "DQN":
  43. cfg = {
  44. # TODO(ekl) we need to set these to prevent the masked values
  45. # from being further processed in DistributionalQModel, which
  46. # would mess up the masking. It is possible to support these if we
  47. # defined a custom DistributionalQModel that is aware of masking.
  48. "hiddens": [],
  49. "dueling": False,
  50. }
  51. else:
  52. cfg = {}
  53. config = dict(
  54. {
  55. "env": "pa_cartpole",
  56. "model": {
  57. "custom_model": "pa_model",
  58. },
  59. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
  60. "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
  61. "num_workers": 0,
  62. "framework": args.framework,
  63. },
  64. **cfg)
  65. stop = {
  66. "training_iteration": args.stop_iters,
  67. "timesteps_total": args.stop_timesteps,
  68. "episode_reward_mean": args.stop_reward,
  69. }
  70. results = tune.run(args.run, stop=stop, config=config, verbose=2)
  71. if args.as_test:
  72. check_learning_achieved(results, args.stop_reward)
  73. ray.shutdown()