mobilenet_v2_with_lstm.py 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. # Explains/tests Issues:
  2. # https://github.com/ray-project/ray/issues/6928
  3. # https://github.com/ray-project/ray/issues/6732
  4. import argparse
  5. from gym.spaces import Discrete, Box
  6. import numpy as np
  7. import os
  8. from ray import tune
  9. from ray.rllib.examples.env.random_env import RandomEnv
  10. from ray.rllib.examples.models.mobilenet_v2_with_lstm_models import \
  11. MobileV2PlusRNNModel, TorchMobileV2PlusRNNModel
  12. from ray.rllib.models import ModelCatalog
  13. from ray.rllib.utils.framework import try_import_tf
  14. tf1, tf, tfv = try_import_tf()
  15. cnn_shape = (4, 4, 3)
  16. # The torch version of MobileNetV2 does channels first.
  17. cnn_shape_torch = (3, 224, 224)
  18. parser = argparse.ArgumentParser()
  19. parser.add_argument(
  20. "--framework",
  21. choices=["tf", "tf2", "tfe", "torch"],
  22. default="tf",
  23. help="The DL framework specifier.")
  24. parser.add_argument("--stop-iters", type=int, default=200)
  25. parser.add_argument("--stop-reward", type=float, default=0.0)
  26. parser.add_argument("--stop-timesteps", type=int, default=100000)
  27. if __name__ == "__main__":
  28. args = parser.parse_args()
  29. # Register our custom model.
  30. ModelCatalog.register_custom_model(
  31. "my_model", TorchMobileV2PlusRNNModel
  32. if args.framework == "torch" else MobileV2PlusRNNModel)
  33. stop = {
  34. "training_iteration": args.stop_iters,
  35. "timesteps_total": args.stop_timesteps,
  36. "episode_reward_mean": args.stop_reward,
  37. }
  38. # Configure our Trainer.
  39. config = {
  40. "env": RandomEnv,
  41. "framework": args.framework,
  42. "model": {
  43. "custom_model": "my_model",
  44. # Extra config passed to the custom model's c'tor as kwargs.
  45. "custom_model_config": {
  46. # By default, torch CNNs use "channels-first",
  47. # tf "channels-last".
  48. "cnn_shape": cnn_shape_torch
  49. if args.framework == "torch" else cnn_shape,
  50. },
  51. "max_seq_len": 20,
  52. "vf_share_layers": True,
  53. },
  54. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
  55. "num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
  56. "num_workers": 0, # no parallelism
  57. "env_config": {
  58. "action_space": Discrete(2),
  59. # Test a simple Image observation space.
  60. "observation_space": Box(
  61. 0.0,
  62. 1.0,
  63. shape=cnn_shape_torch
  64. if args.framework == "torch" else cnn_shape,
  65. dtype=np.float32)
  66. },
  67. }
  68. tune.run("PPO", config=config, stop=stop, verbose=1)