fractional_gpus.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. """Example of a custom gym environment and model. Run this for a demo.
  2. This example shows:
  3. - using a custom environment
  4. - using a custom model
  5. - using Tune for grid search
  6. You can visualize experiment results in ~/ray_results using TensorBoard.
  7. """
  8. import argparse
  9. import ray
  10. from ray import tune
  11. from ray.rllib.examples.env.gpu_requiring_env import GPURequiringEnv
  12. from ray.rllib.utils.framework import try_import_tf, try_import_torch
  13. from ray.rllib.utils.test_utils import check_learning_achieved
  14. tf1, tf, tfv = try_import_tf()
  15. torch, nn = try_import_torch()
  16. parser = argparse.ArgumentParser()
  17. parser.add_argument(
  18. "--run",
  19. type=str,
  20. default="PPO",
  21. help="The RLlib-registered algorithm to use.")
  22. parser.add_argument(
  23. "--framework",
  24. choices=["tf", "tf2", "tfe", "torch"],
  25. default="tf",
  26. help="The DL framework specifier.")
  27. parser.add_argument("--num-gpus", type=float, default=0.5)
  28. parser.add_argument("--num-workers", type=int, default=1)
  29. parser.add_argument("--num-gpus-per-worker", type=float, default=0.0)
  30. parser.add_argument("--num-envs-per-worker", type=int, default=1)
  31. parser.add_argument(
  32. "--as-test",
  33. action="store_true",
  34. help="Whether this script should be run as a test: --stop-reward must "
  35. "be achieved within --stop-timesteps AND --stop-iters.")
  36. parser.add_argument(
  37. "--stop-iters",
  38. type=int,
  39. default=50,
  40. help="Number of iterations to train.")
  41. parser.add_argument(
  42. "--stop-timesteps",
  43. type=int,
  44. default=100000,
  45. help="Number of timesteps to train.")
  46. parser.add_argument(
  47. "--stop-reward",
  48. type=float,
  49. default=180.0,
  50. help="Reward at which we stop training.")
  51. if __name__ == "__main__":
  52. args = parser.parse_args()
  53. ray.init(num_cpus=4)
  54. # These configs have been tested on a p2.8xlarge machine (8 GPUs, 16 CPUs),
  55. # where ray was started using only one of these GPUs:
  56. # $ ray start --num-gpus=1 --head
  57. # Note: A strange error could occur when using tf:
  58. # "NotImplementedError: Cannot convert a symbolic Tensor
  59. # (default_policy/cond/strided_slice:0) to a numpy array."
  60. # In rllib/utils/exploration/random.py.
  61. # Fix: Install numpy version 1.19.5.
  62. # Tested arg combinations (4 tune trials will be setup; see
  63. # tune.grid_search over 4 learning rates below):
  64. # - num_gpus=0.5 (2 tune trials should run in parallel).
  65. # - num_gpus=0.3 (3 tune trials should run in parallel).
  66. # - num_gpus=0.25 (4 tune trials should run in parallel)
  67. # - num_gpus=0.2 + num_gpus_per_worker=0.1 (1 worker) -> 0.3
  68. # -> 3 tune trials should run in parallel.
  69. # - num_gpus=0.2 + num_gpus_per_worker=0.1 (2 workers) -> 0.4
  70. # -> 2 tune trials should run in parallel.
  71. # - num_gpus=0.4 + num_gpus_per_worker=0.1 (2 workers) -> 0.6
  72. # -> 1 tune trial should run in parallel.
  73. config = {
  74. # Setup the test env as one that requires a GPU, iff
  75. # num_gpus_per_worker > 0.
  76. "env": GPURequiringEnv
  77. if args.num_gpus_per_worker > 0.0 else "CartPole-v0",
  78. # How many GPUs does the local worker (driver) need? For most algos,
  79. # this is where the learning updates happen.
  80. # Set this to > 1 for multi-GPU learning.
  81. "num_gpus": args.num_gpus,
  82. # How many RolloutWorkers (each with n environment copies:
  83. # `num_envs_per_worker`)?
  84. "num_workers": args.num_workers,
  85. # How many GPUs does each RolloutWorker (`num_workers`) need?
  86. "num_gpus_per_worker": args.num_gpus_per_worker,
  87. # This setting should not really matter as it does not affect the
  88. # number of GPUs reserved for each worker.
  89. "num_envs_per_worker": args.num_envs_per_worker,
  90. # 4 tune trials altogether.
  91. "lr": tune.grid_search([0.005, 0.003, 0.001, 0.0001]),
  92. "framework": args.framework,
  93. }
  94. stop = {
  95. "training_iteration": args.stop_iters,
  96. "timesteps_total": args.stop_timesteps,
  97. "episode_reward_mean": args.stop_reward,
  98. }
  99. # Note: The above GPU settings should also work in case you are not
  100. # running via tune.run(), but instead do:
  101. # >> from ray.rllib.agents.ppo import PPOTrainer
  102. # >> trainer = PPOTrainer(config=config)
  103. # >> for _ in range(10):
  104. # >> results = trainer.train()
  105. # >> print(results)
  106. results = tune.run(args.run, config=config, stop=stop)
  107. if args.as_test:
  108. check_learning_achieved(results, args.stop_reward)
  109. ray.shutdown()