repeatafterme-ppo-lstm.yaml 733 B

1234567891011121314151617181920212223242526
  1. repeat-after-me-ppo-w-lstm:
  2. # Default case: Discrete(2) observations/actions.
  3. env: ray.rllib.examples.env.repeat_after_me_env.RepeatAfterMeEnv
  4. run: PPO
  5. stop:
  6. episode_reward_mean: 50
  7. timesteps_total: 100000
  8. config:
  9. # Works for both torch and tf.
  10. framework: tf
  11. # Make env partially observable.
  12. env_config:
  13. config:
  14. repeat_delay: 2
  15. gamma: 0.9
  16. lr: 0.0003
  17. num_workers: 0
  18. num_envs_per_worker: 20
  19. num_sgd_iter: 5
  20. entropy_coeff: 0.00001
  21. model:
  22. use_lstm: true
  23. lstm_cell_size: 64
  24. max_seq_len: 20
  25. fcnet_hiddens: [64]
  26. vf_share_layers: true