td3.py 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. """A more stable successor to TD3.
  2. By default, this uses a near-identical configuration to that reported in the
  3. TD3 paper.
  4. """
  5. from ray.rllib.agents.ddpg.ddpg import DDPGTrainer, \
  6. DEFAULT_CONFIG as DDPG_CONFIG
  7. from ray.rllib.utils.annotations import override
  8. from ray.rllib.utils.typing import TrainerConfigDict
  9. TD3_DEFAULT_CONFIG = DDPGTrainer.merge_trainer_configs(
  10. DDPG_CONFIG,
  11. {
  12. # largest changes: twin Q functions, delayed policy updates, and target
  13. # smoothing
  14. "twin_q": True,
  15. "policy_delay": 2,
  16. "smooth_target_policy": True,
  17. "target_noise": 0.2,
  18. "target_noise_clip": 0.5,
  19. "exploration_config": {
  20. # TD3 uses simple Gaussian noise on top of deterministic NN-output
  21. # actions (after a possible pure random phase of n timesteps).
  22. "type": "GaussianNoise",
  23. # For how many timesteps should we return completely random
  24. # actions, before we start adding (scaled) noise?
  25. "random_timesteps": 10000,
  26. # Gaussian stddev of action noise for exploration.
  27. "stddev": 0.1,
  28. # Scaling settings by which the Gaussian noise is scaled before
  29. # being added to the actions. NOTE: The scale timesteps start only
  30. # after(!) any random steps have been finished.
  31. # By default, do not anneal over time (fixed 1.0).
  32. "initial_scale": 1.0,
  33. "final_scale": 1.0,
  34. "scale_timesteps": 1
  35. },
  36. # other changes & things we want to keep fixed:
  37. # larger actor learning rate, no l2 regularisation, no Huber loss, etc.
  38. "learning_starts": 10000,
  39. "actor_hiddens": [400, 300],
  40. "critic_hiddens": [400, 300],
  41. "n_step": 1,
  42. "gamma": 0.99,
  43. "actor_lr": 1e-3,
  44. "critic_lr": 1e-3,
  45. "l2_reg": 0.0,
  46. "tau": 5e-3,
  47. "train_batch_size": 100,
  48. "use_huber": False,
  49. "target_network_update_freq": 0,
  50. "num_workers": 0,
  51. "num_gpus_per_worker": 0,
  52. "worker_side_prioritization": False,
  53. "buffer_size": 1000000,
  54. "prioritized_replay": False,
  55. "clip_rewards": False,
  56. "use_state_preprocessor": False,
  57. })
  58. class TD3Trainer(DDPGTrainer):
  59. @classmethod
  60. @override(DDPGTrainer)
  61. def get_default_config(cls) -> TrainerConfigDict:
  62. return TD3_DEFAULT_CONFIG