halfcheetah-cql.yaml 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. halfcheetah_cql:
  2. env:
  3. grid_search:
  4. #- ray.rllib.examples.env.d4rl_env.halfcheetah_random
  5. #- ray.rllib.examples.env.d4rl_env.halfcheetah_medium
  6. - ray.rllib.examples.env.d4rl_env.halfcheetah_expert
  7. #- ray.rllib.examples.env.d4rl_env.halfcheetah_medium_replay
  8. run: CQL
  9. config:
  10. # SAC Configs
  11. #input: d4rl.halfcheetah-random-v0
  12. #input: d4rl.halfcheetah-medium-v0
  13. input: d4rl.halfcheetah-expert-v0
  14. #input: d4rl.halfcheetah-medium-replay-v0
  15. # Works for both torch and tf.
  16. framework: tf
  17. soft_horizon: False
  18. horizon: 1000
  19. Q_model:
  20. fcnet_activation: relu
  21. fcnet_hiddens: [256, 256, 256]
  22. policy_model:
  23. fcnet_activation: relu
  24. fcnet_hiddens: [256, 256, 256]
  25. tau: 0.005
  26. target_entropy: auto
  27. no_done_at_end: false
  28. n_step: 3
  29. rollout_fragment_length: 1
  30. prioritized_replay: false
  31. train_batch_size: 256
  32. target_network_update_freq: 0
  33. timesteps_per_iteration: 1000
  34. learning_starts: 256
  35. optimization:
  36. actor_learning_rate: 0.0001
  37. critic_learning_rate: 0.0003
  38. entropy_learning_rate: 0.0001
  39. num_workers: 0
  40. num_gpus: 1
  41. metrics_smoothing_episodes: 5
  42. # CQL Configs
  43. min_q_weight: 5.0
  44. bc_iters: 20000
  45. temperature: 1.0
  46. num_actions: 10
  47. lagrangian: False
  48. evaluation_interval: 3
  49. evaluation_config:
  50. input: sampler