custom_rnn_model.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. """Example of using a custom RNN keras model."""
  2. import argparse
  3. import os
  4. import ray
  5. from ray import air, tune
  6. from ray.tune.registry import register_env
  7. from ray.rllib.examples.env.repeat_after_me_env import RepeatAfterMeEnv
  8. from ray.rllib.examples.env.repeat_initial_obs_env import RepeatInitialObsEnv
  9. from ray.rllib.examples.models.rnn_model import RNNModel, TorchRNNModel
  10. from ray.rllib.models import ModelCatalog
  11. from ray.rllib.utils.test_utils import check_learning_achieved
  12. from ray.tune.registry import get_trainable_cls
  13. parser = argparse.ArgumentParser()
  14. parser.add_argument(
  15. "--run", type=str, default="PPO", help="The RLlib-registered algorithm to use."
  16. )
  17. parser.add_argument("--env", type=str, default="RepeatAfterMeEnv")
  18. parser.add_argument("--num-cpus", type=int, default=0)
  19. parser.add_argument(
  20. "--framework",
  21. choices=["tf", "tf2", "torch"],
  22. default="torch",
  23. help="The DL framework specifier.",
  24. )
  25. parser.add_argument(
  26. "--as-test",
  27. action="store_true",
  28. help="Whether this script should be run as a test: --stop-reward must "
  29. "be achieved within --stop-timesteps AND --stop-iters.",
  30. )
  31. parser.add_argument(
  32. "--stop-iters", type=int, default=100, help="Number of iterations to train."
  33. )
  34. parser.add_argument(
  35. "--stop-timesteps", type=int, default=100000, help="Number of timesteps to train."
  36. )
  37. parser.add_argument(
  38. "--stop-reward", type=float, default=90.0, help="Reward at which we stop training."
  39. )
  40. parser.add_argument(
  41. "--local-mode",
  42. action="store_true",
  43. help="Init Ray in local mode for easier debugging.",
  44. )
  45. if __name__ == "__main__":
  46. args = parser.parse_args()
  47. ray.init(num_cpus=args.num_cpus or None, local_mode=args.local_mode)
  48. ModelCatalog.register_custom_model(
  49. "rnn", TorchRNNModel if args.framework == "torch" else RNNModel
  50. )
  51. register_env("RepeatAfterMeEnv", lambda c: RepeatAfterMeEnv(c))
  52. register_env("RepeatInitialObsEnv", lambda _: RepeatInitialObsEnv())
  53. config = (
  54. get_trainable_cls(args.run)
  55. .get_default_config()
  56. .environment(args.env, env_config={"repeat_delay": 2})
  57. .framework(args.framework)
  58. .rollouts(num_rollout_workers=0, num_envs_per_worker=20)
  59. .training(
  60. model={
  61. "custom_model": "rnn",
  62. "max_seq_len": 20,
  63. "custom_model_config": {
  64. "cell_size": 32,
  65. },
  66. },
  67. gamma=0.9,
  68. # TODO (Kourosh): Enable when LSTMs are supported.
  69. _enable_learner_api=False,
  70. )
  71. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
  72. .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0")))
  73. .rl_module(_enable_rl_module_api=False)
  74. )
  75. if args.run == "PPO":
  76. config.training(entropy_coeff=0.001, num_sgd_iter=5, vf_loss_coeff=1e-5)
  77. stop = {
  78. "training_iteration": args.stop_iters,
  79. "timesteps_total": args.stop_timesteps,
  80. "episode_reward_mean": args.stop_reward,
  81. }
  82. # To run the Algorithm without ``Tuner.fit()``, using our RNN model and
  83. # manual state-in handling, do the following:
  84. # Example (use `config` from the above code):
  85. # >> import numpy as np
  86. # >> from ray.rllib.algorithms.ppo import PPO
  87. # >>
  88. # >> algo = config.build()
  89. # >> lstm_cell_size = config.model["custom_model_config"]["cell_size"]
  90. # >> env = RepeatAfterMeEnv({})
  91. # >> obs, info = env.reset()
  92. # >>
  93. # >> # range(2) b/c h- and c-states of the LSTM.
  94. # >> init_state = state = [
  95. # .. np.zeros([lstm_cell_size], np.float32) for _ in range(2)
  96. # .. ]
  97. # >>
  98. # >> while True:
  99. # >> a, state_out, _ = algo.compute_single_action(obs, state)
  100. # >> obs, reward, done, _, _ = env.step(a)
  101. # >> if done:
  102. # >> obs, info = env.reset()
  103. # >> state = init_state
  104. # >> else:
  105. # >> state = state_out
  106. tuner = tune.Tuner(
  107. args.run,
  108. param_space=config.to_dict(),
  109. run_config=air.RunConfig(stop=stop, verbose=1),
  110. )
  111. results = tuner.fit()
  112. if args.as_test:
  113. check_learning_achieved(results, args.stop_reward)
  114. ray.shutdown()