cartpole_lstm.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. import argparse
  2. import os
  3. from ray.rllib.examples.env.stateless_cartpole import StatelessCartPole
  4. from ray.rllib.utils.test_utils import check_learning_achieved
  5. parser = argparse.ArgumentParser()
  6. parser.add_argument(
  7. "--run",
  8. type=str,
  9. default="PPO",
  10. help="The RLlib-registered algorithm to use.")
  11. parser.add_argument("--num-cpus", type=int, default=0)
  12. parser.add_argument(
  13. "--framework",
  14. choices=["tf", "tf2", "tfe", "torch"],
  15. default="tf",
  16. help="The DL framework specifier.")
  17. parser.add_argument("--eager-tracing", action="store_true")
  18. parser.add_argument("--use-prev-action", action="store_true")
  19. parser.add_argument("--use-prev-reward", action="store_true")
  20. parser.add_argument(
  21. "--as-test",
  22. action="store_true",
  23. help="Whether this script should be run as a test: --stop-reward must "
  24. "be achieved within --stop-timesteps AND --stop-iters.")
  25. parser.add_argument(
  26. "--stop-iters",
  27. type=int,
  28. default=200,
  29. help="Number of iterations to train.")
  30. parser.add_argument(
  31. "--stop-timesteps",
  32. type=int,
  33. default=100000,
  34. help="Number of timesteps to train.")
  35. parser.add_argument(
  36. "--stop-reward",
  37. type=float,
  38. default=150.0,
  39. help="Reward at which we stop training.")
  40. if __name__ == "__main__":
  41. import ray
  42. from ray import tune
  43. args = parser.parse_args()
  44. ray.init(num_cpus=args.num_cpus or None)
  45. configs = {
  46. "PPO": {
  47. "num_sgd_iter": 5,
  48. "model": {
  49. "vf_share_layers": True,
  50. },
  51. "vf_loss_coeff": 0.0001,
  52. },
  53. "IMPALA": {
  54. "num_workers": 2,
  55. "num_gpus": 0,
  56. "vf_loss_coeff": 0.01,
  57. },
  58. }
  59. config = dict(
  60. configs[args.run],
  61. **{
  62. "env": StatelessCartPole,
  63. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
  64. "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
  65. "model": {
  66. "use_lstm": True,
  67. "lstm_cell_size": 256,
  68. "lstm_use_prev_action": args.use_prev_action,
  69. "lstm_use_prev_reward": args.use_prev_reward,
  70. },
  71. "framework": args.framework,
  72. # Run with tracing enabled for tfe/tf2?
  73. "eager_tracing": args.eager_tracing,
  74. })
  75. stop = {
  76. "training_iteration": args.stop_iters,
  77. "timesteps_total": args.stop_timesteps,
  78. "episode_reward_mean": args.stop_reward,
  79. }
  80. # To run the Trainer without tune.run, using our LSTM model and
  81. # manual state-in handling, do the following:
  82. # Example (use `config` from the above code):
  83. # >> import numpy as np
  84. # >> from ray.rllib.agents.ppo import PPOTrainer
  85. # >>
  86. # >> trainer = PPOTrainer(config)
  87. # >> lstm_cell_size = config["model"]["lstm_cell_size"]
  88. # >> env = StatelessCartPole()
  89. # >> obs = env.reset()
  90. # >>
  91. # >> # range(2) b/c h- and c-states of the LSTM.
  92. # >> init_state = state = [
  93. # .. np.zeros([lstm_cell_size], np.float32) for _ in range(2)
  94. # .. ]
  95. # >> prev_a = 0
  96. # >> prev_r = 0.0
  97. # >>
  98. # >> while True:
  99. # >> a, state_out, _ = trainer.compute_single_action(
  100. # .. obs, state, prev_a, prev_r)
  101. # >> obs, reward, done, _ = env.step(a)
  102. # >> if done:
  103. # >> obs = env.reset()
  104. # >> state = init_state
  105. # >> prev_a = 0
  106. # >> prev_r = 0.0
  107. # >> else:
  108. # >> state = state_out
  109. # >> prev_a = a
  110. # >> prev_r = reward
  111. results = tune.run(args.run, config=config, stop=stop, verbose=2)
  112. if args.as_test:
  113. check_learning_achieved(results, args.stop_reward)
  114. ray.shutdown()