vizdoom_with_attention_net.py 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. import argparse
  2. import os
  3. parser = argparse.ArgumentParser()
  4. parser.add_argument(
  5. "--run",
  6. type=str,
  7. default="PPO",
  8. help="The RLlib-registered algorithm to use.")
  9. parser.add_argument("--num-cpus", type=int, default=0)
  10. parser.add_argument(
  11. "--framework",
  12. choices=["tf", "tf2", "tfe", "torch"],
  13. default="tf",
  14. help="The DL framework specifier.")
  15. parser.add_argument(
  16. "--from-checkpoint",
  17. type=str,
  18. default=None,
  19. help="Full path to a checkpoint file for restoring a previously saved "
  20. "Trainer state.")
  21. parser.add_argument("--num-workers", type=int, default=0)
  22. parser.add_argument(
  23. "--use-n-prev-actions",
  24. type=int,
  25. default=0,
  26. help="How many of the previous actions to use as attention input.")
  27. parser.add_argument(
  28. "--use-n-prev-rewards",
  29. type=int,
  30. default=0,
  31. help="How many of the previous rewards to use as attention input.")
  32. parser.add_argument("--stop-iters", type=int, default=9999)
  33. parser.add_argument("--stop-timesteps", type=int, default=100000000)
  34. parser.add_argument("--stop-reward", type=float, default=1000.0)
  35. if __name__ == "__main__":
  36. import ray
  37. from ray import tune
  38. args = parser.parse_args()
  39. ray.init(num_cpus=args.num_cpus or None)
  40. config = {
  41. "env": "VizdoomBasic-v0",
  42. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
  43. "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
  44. "model": {
  45. "conv_filters": [],
  46. "use_attention": True,
  47. "attention_num_transformer_units": 1,
  48. "attention_dim": 64,
  49. "attention_num_heads": 2,
  50. "attention_memory_inference": 100,
  51. "attention_memory_training": 50,
  52. "vf_share_layers": True,
  53. "attention_use_n_prev_actions": args.use_n_prev_actions,
  54. "attention_use_n_prev_rewards": args.use_n_prev_rewards,
  55. },
  56. "framework": args.framework,
  57. # Run with tracing enabled for tfe/tf2.
  58. "eager_tracing": args.framework in ["tfe", "tf2"],
  59. "num_workers": args.num_workers,
  60. "vf_loss_coeff": 0.01,
  61. }
  62. stop = {
  63. "training_iteration": args.stop_iters,
  64. "timesteps_total": args.stop_timesteps,
  65. "episode_reward_mean": args.stop_reward,
  66. }
  67. results = tune.run(
  68. args.run,
  69. config=config,
  70. stop=stop,
  71. verbose=2,
  72. checkpoint_freq=5,
  73. checkpoint_at_end=True,
  74. restore=args.from_checkpoint,
  75. )
  76. print(results)
  77. ray.shutdown()