hopper-bc.yaml 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. hopper_bc:
  2. env:
  3. grid_search:
  4. - ray.rllib.examples.env.d4rl_env.hopper_random
  5. #- ray.rllib.examples.env.d4rl_env.hopper_medium
  6. #- ray.rllib.examples.env.d4rl_env.hopper_expert
  7. #- ray.rllib.examples.env.d4rl_env.hopper_medium_replay
  8. run: CQL
  9. config:
  10. # SAC Configs
  11. input: d4rl.hopper-random-v0
  12. #input: d4rl.hopper-medium-v0
  13. #input: d4rl.hopper-expert-v0
  14. #input: d4rl.hopper-medium-replay-v0
  15. framework: torch
  16. soft_horizon: False
  17. horizon: 1000
  18. Q_model:
  19. fcnet_activation: relu
  20. fcnet_hiddens: [256, 256, 256]
  21. policy_model:
  22. fcnet_activation: relu
  23. fcnet_hiddens: [256, 256, 256]
  24. tau: 0.005
  25. target_entropy: auto
  26. no_done_at_end: false
  27. n_step: 1
  28. rollout_fragment_length: 1
  29. prioritized_replay: false
  30. train_batch_size: 256
  31. target_network_update_freq: 0
  32. timesteps_per_iteration: 1000
  33. learning_starts: 10
  34. optimization:
  35. actor_learning_rate: 0.0001
  36. critic_learning_rate: 0.0003
  37. entropy_learning_rate: 0.0001
  38. num_workers: 0
  39. num_gpus: 1
  40. clip_actions: false
  41. normalize_actions: true
  42. evaluation_interval: 1
  43. metrics_smoothing_episodes: 5
  44. # CQL Configs
  45. min_q_weight: 5.0
  46. bc_iters: 200000000
  47. temperature: 1.0
  48. num_actions: 10
  49. lagrangian: False
  50. evaluation_config:
  51. input: sampler