interest-evolution-recsim-env-bandit-linucb.yaml 1.2 KB

1234567891011121314151617181920212223242526272829
  1. interest-evolution-recsim-env-bandit-linucb:
  2. env: ray.rllib.examples.env.recommender_system_envs_with_recsim.InterestEvolutionRecSimEnv
  3. run: BanditLinUCB
  4. stop:
  5. sampler_results/episode_reward_mean: 180.0
  6. timesteps_total: 50000
  7. config:
  8. framework: torch
  9. # RLlib/RecSim wrapper specific settings:
  10. env_config:
  11. # Env class specified above takes one `config` arg in its c'tor:
  12. config:
  13. # Each step, sample `num_candidates` documents using the env-internal
  14. # document sampler model (a logic that creates n documents to select
  15. # the slate from).
  16. resample_documents: true
  17. num_candidates: 100
  18. # How many documents to recommend (out of `num_candidates`) each
  19. # timestep?
  20. slate_size: 2
  21. # Should the action space be purely Discrete? Useful for algos that
  22. # don't support MultiDiscrete (e.g. DQN or Bandits).
  23. # SlateQ handles MultiDiscrete action spaces.
  24. convert_to_discrete_action_space: true
  25. wrap_for_bandits: true
  26. seed: 0
  27. metrics_num_episodes_for_smoothing: 500