custom_rnn_model.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. """Example of using a custom RNN keras model."""
  2. import argparse
  3. import os
  4. import ray
  5. from ray import tune
  6. from ray.tune.registry import register_env
  7. from ray.rllib.examples.env.repeat_after_me_env import RepeatAfterMeEnv
  8. from ray.rllib.examples.env.repeat_initial_obs_env import RepeatInitialObsEnv
  9. from ray.rllib.examples.models.rnn_model import RNNModel, TorchRNNModel
  10. from ray.rllib.models import ModelCatalog
  11. from ray.rllib.utils.test_utils import check_learning_achieved
  12. parser = argparse.ArgumentParser()
  13. parser.add_argument(
  14. "--run",
  15. type=str,
  16. default="PPO",
  17. help="The RLlib-registered algorithm to use.")
  18. parser.add_argument("--env", type=str, default="RepeatAfterMeEnv")
  19. parser.add_argument("--num-cpus", type=int, default=0)
  20. parser.add_argument(
  21. "--framework",
  22. choices=["tf", "tf2", "tfe", "torch"],
  23. default="tf",
  24. help="The DL framework specifier.")
  25. parser.add_argument(
  26. "--as-test",
  27. action="store_true",
  28. help="Whether this script should be run as a test: --stop-reward must "
  29. "be achieved within --stop-timesteps AND --stop-iters.")
  30. parser.add_argument(
  31. "--stop-iters",
  32. type=int,
  33. default=100,
  34. help="Number of iterations to train.")
  35. parser.add_argument(
  36. "--stop-timesteps",
  37. type=int,
  38. default=100000,
  39. help="Number of timesteps to train.")
  40. parser.add_argument(
  41. "--stop-reward",
  42. type=float,
  43. default=90.0,
  44. help="Reward at which we stop training.")
  45. if __name__ == "__main__":
  46. args = parser.parse_args()
  47. ray.init(num_cpus=args.num_cpus or None)
  48. ModelCatalog.register_custom_model(
  49. "rnn", TorchRNNModel if args.framework == "torch" else RNNModel)
  50. register_env("RepeatAfterMeEnv", lambda c: RepeatAfterMeEnv(c))
  51. register_env("RepeatInitialObsEnv", lambda _: RepeatInitialObsEnv())
  52. config = {
  53. "env": args.env,
  54. "env_config": {
  55. "repeat_delay": 2,
  56. },
  57. "gamma": 0.9,
  58. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
  59. "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
  60. "num_workers": 0,
  61. "num_envs_per_worker": 20,
  62. "entropy_coeff": 0.001,
  63. "num_sgd_iter": 5,
  64. "vf_loss_coeff": 1e-5,
  65. "model": {
  66. "custom_model": "rnn",
  67. "max_seq_len": 20,
  68. "custom_model_config": {
  69. "cell_size": 32,
  70. },
  71. },
  72. "framework": args.framework,
  73. }
  74. stop = {
  75. "training_iteration": args.stop_iters,
  76. "timesteps_total": args.stop_timesteps,
  77. "episode_reward_mean": args.stop_reward,
  78. }
  79. # To run the Trainer without tune.run, using our RNN model and
  80. # manual state-in handling, do the following:
  81. # Example (use `config` from the above code):
  82. # >> import numpy as np
  83. # >> from ray.rllib.agents.ppo import PPOTrainer
  84. # >>
  85. # >> trainer = PPOTrainer(config)
  86. # >> lstm_cell_size = config["model"]["custom_model_config"]["cell_size"]
  87. # >> env = RepeatAfterMeEnv({})
  88. # >> obs = env.reset()
  89. # >>
  90. # >> # range(2) b/c h- and c-states of the LSTM.
  91. # >> init_state = state = [
  92. # .. np.zeros([lstm_cell_size], np.float32) for _ in range(2)
  93. # .. ]
  94. # >>
  95. # >> while True:
  96. # >> a, state_out, _ = trainer.compute_single_action(obs, state)
  97. # >> obs, reward, done, _ = env.step(a)
  98. # >> if done:
  99. # >> obs = env.reset()
  100. # >> state = init_state
  101. # >> else:
  102. # >> state = state_out
  103. results = tune.run(args.run, config=config, stop=stop, verbose=1)
  104. if args.as_test:
  105. check_learning_achieved(results, args.stop_reward)
  106. ray.shutdown()