123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226 |
- import logging
- from typing import Type
- from ray.rllib.agents.trainer import with_common_config
- from ray.rllib.agents.dqn.simple_q import SimpleQTrainer
- from ray.rllib.agents.ddpg.ddpg_tf_policy import DDPGTFPolicy
- from ray.rllib.policy.policy import Policy
- from ray.rllib.utils.annotations import override
- from ray.rllib.utils.deprecation import DEPRECATED_VALUE
- from ray.rllib.utils.typing import TrainerConfigDict
- logger = logging.getLogger(__name__)
- # yapf: disable
- # __sphinx_doc_begin__
- DEFAULT_CONFIG = with_common_config({
- # === Twin Delayed DDPG (TD3) and Soft Actor-Critic (SAC) tricks ===
- # TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
- # In addition to settings below, you can use "exploration_noise_type" and
- # "exploration_gauss_act_noise" to get IID Gaussian exploration noise
- # instead of OU exploration noise.
- # twin Q-net
- "twin_q": False,
- # delayed policy update
- "policy_delay": 1,
- # target policy smoothing
- # (this also replaces OU exploration noise with IID Gaussian exploration
- # noise, for now)
- "smooth_target_policy": False,
- # gaussian stddev of target action noise for smoothing
- "target_noise": 0.2,
- # target noise limit (bound)
- "target_noise_clip": 0.5,
- # === Evaluation ===
- # Evaluate with epsilon=0 every `evaluation_interval` training iterations.
- # The evaluation stats will be reported under the "evaluation" metric key.
- # Note that evaluation is currently not parallelized, and that for Ape-X
- # metrics are already only reported for the lowest epsilon workers.
- "evaluation_interval": None,
- # Number of episodes to run per evaluation period.
- "evaluation_duration": 10,
- # === Model ===
- # Apply a state preprocessor with spec given by the "model" config option
- # (like other RL algorithms). This is mostly useful if you have a weird
- # observation shape, like an image. Disabled by default.
- "use_state_preprocessor": False,
- # Postprocess the policy network model output with these hidden layers. If
- # use_state_preprocessor is False, then these will be the *only* hidden
- # layers in the network.
- "actor_hiddens": [400, 300],
- # Hidden layers activation of the postprocessing stage of the policy
- # network
- "actor_hidden_activation": "relu",
- # Postprocess the critic network model output with these hidden layers;
- # again, if use_state_preprocessor is True, then the state will be
- # preprocessed by the model specified with the "model" config option first.
- "critic_hiddens": [400, 300],
- # Hidden layers activation of the postprocessing state of the critic.
- "critic_hidden_activation": "relu",
- # N-step Q learning
- "n_step": 1,
- # === Exploration ===
- "exploration_config": {
- # DDPG uses OrnsteinUhlenbeck (stateful) noise to be added to NN-output
- # actions (after a possible pure random phase of n timesteps).
- "type": "OrnsteinUhlenbeckNoise",
- # For how many timesteps should we return completely random actions,
- # before we start adding (scaled) noise?
- "random_timesteps": 1000,
- # The OU-base scaling factor to always apply to action-added noise.
- "ou_base_scale": 0.1,
- # The OU theta param.
- "ou_theta": 0.15,
- # The OU sigma param.
- "ou_sigma": 0.2,
- # The initial noise scaling factor.
- "initial_scale": 1.0,
- # The final noise scaling factor.
- "final_scale": 0.02,
- # Timesteps over which to anneal scale (from initial to final values).
- "scale_timesteps": 10000,
- },
- # Number of env steps to optimize for before returning
- "timesteps_per_iteration": 1000,
- # Extra configuration that disables exploration.
- "evaluation_config": {
- "explore": False
- },
- # === Replay buffer ===
- # Size of the replay buffer. Note that if async_updates is set, then
- # each worker will have a replay buffer of this size.
- "buffer_size": DEPRECATED_VALUE,
- "replay_buffer_config": {
- "type": "MultiAgentReplayBuffer",
- "capacity": 50000,
- },
- # Set this to True, if you want the contents of your buffer(s) to be
- # stored in any saved checkpoints as well.
- # Warnings will be created if:
- # - This is True AND restoring from a checkpoint that contains no buffer
- # data.
- # - This is False AND restoring from a checkpoint that does contain
- # buffer data.
- "store_buffer_in_checkpoints": False,
- # If True prioritized replay buffer will be used.
- "prioritized_replay": True,
- # Alpha parameter for prioritized replay buffer.
- "prioritized_replay_alpha": 0.6,
- # Beta parameter for sampling from prioritized replay buffer.
- "prioritized_replay_beta": 0.4,
- # Time steps over which the beta parameter is annealed.
- "prioritized_replay_beta_annealing_timesteps": 20000,
- # Final value of beta
- "final_prioritized_replay_beta": 0.4,
- # Epsilon to add to the TD errors when updating priorities.
- "prioritized_replay_eps": 1e-6,
- # Whether to LZ4 compress observations
- "compress_observations": False,
- # The intensity with which to update the model (vs collecting samples from
- # the env). If None, uses the "natural" value of:
- # `train_batch_size` / (`rollout_fragment_length` x `num_workers` x
- # `num_envs_per_worker`).
- # If provided, will make sure that the ratio between ts inserted into and
- # sampled from the buffer matches the given value.
- # Example:
- # training_intensity=1000.0
- # train_batch_size=250 rollout_fragment_length=1
- # num_workers=1 (or 0) num_envs_per_worker=1
- # -> natural value = 250 / 1 = 250.0
- # -> will make sure that replay+train op will be executed 4x as
- # often as rollout+insert op (4 * 250 = 1000).
- # See: rllib/agents/dqn/dqn.py::calculate_rr_weights for further details.
- "training_intensity": None,
- # === Optimization ===
- # Learning rate for the critic (Q-function) optimizer.
- "critic_lr": 1e-3,
- # Learning rate for the actor (policy) optimizer.
- "actor_lr": 1e-3,
- # Update the target network every `target_network_update_freq` steps.
- "target_network_update_freq": 0,
- # Update the target by \tau * policy + (1-\tau) * target_policy
- "tau": 0.002,
- # If True, use huber loss instead of squared loss for critic network
- # Conventionally, no need to clip gradients if using a huber loss
- "use_huber": False,
- # Threshold of a huber loss
- "huber_threshold": 1.0,
- # Weights for L2 regularization
- "l2_reg": 1e-6,
- # If not None, clip gradients during optimization at this value
- "grad_clip": None,
- # How many steps of the model to sample before learning starts.
- "learning_starts": 1500,
- # Update the replay buffer with this many samples at once. Note that this
- # setting applies per-worker if num_workers > 1.
- "rollout_fragment_length": 1,
- # Size of a batched sampled from replay buffer for training. Note that
- # if async_updates is set, then each worker returns gradients for a
- # batch of this size.
- "train_batch_size": 256,
- # === Parallelism ===
- # Number of workers for collecting samples with. This only makes sense
- # to increase if your environment is particularly slow to sample, or if
- # you're using the Async or Ape-X optimizers.
- "num_workers": 0,
- # Whether to compute priorities on workers.
- "worker_side_prioritization": False,
- # Prevent reporting frequency from going lower than this time span.
- "min_time_s_per_reporting": 1,
- })
- # __sphinx_doc_end__
- # yapf: enable
- class DDPGTrainer(SimpleQTrainer):
- @classmethod
- @override(SimpleQTrainer)
- def get_default_config(cls) -> TrainerConfigDict:
- return DEFAULT_CONFIG
- @override(SimpleQTrainer)
- def get_default_policy_class(self,
- config: TrainerConfigDict) -> Type[Policy]:
- if config["framework"] == "torch":
- from ray.rllib.agents.ddpg.ddpg_torch_policy import DDPGTorchPolicy
- return DDPGTorchPolicy
- else:
- return DDPGTFPolicy
- @override(SimpleQTrainer)
- def validate_config(self, config: TrainerConfigDict) -> None:
- # Call super's validation method.
- super().validate_config(config)
- if config["model"]["custom_model"]:
- logger.warning(
- "Setting use_state_preprocessor=True since a custom model "
- "was specified.")
- config["use_state_preprocessor"] = True
- if config["grad_clip"] is not None and config["grad_clip"] <= 0.0:
- raise ValueError("`grad_clip` value must be > 0.0!")
- if config["exploration_config"]["type"] == "ParameterNoise":
- if config["batch_mode"] != "complete_episodes":
- logger.warning(
- "ParameterNoise Exploration requires `batch_mode` to be "
- "'complete_episodes'. Setting "
- "batch_mode=complete_episodes.")
- config["batch_mode"] = "complete_episodes"
- if config.get("prioritized_replay"):
- if config["multiagent"]["replay_mode"] == "lockstep":
- raise ValueError("Prioritized replay is not supported when "
- "replay_mode=lockstep.")
- else:
- if config.get("worker_side_prioritization"):
- raise ValueError(
- "Worker side prioritization is not supported when "
- "prioritized_replay=False.")
|