r2d2_tf_policy.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. """TensorFlow policy class used for R2D2."""
  2. from typing import Dict, List, Optional, Tuple
  3. import gymnasium as gym
  4. import ray
  5. from ray.rllib.algorithms.dqn.dqn_tf_policy import (
  6. clip_gradients,
  7. compute_q_values,
  8. PRIO_WEIGHTS,
  9. postprocess_nstep_and_prio,
  10. )
  11. from ray.rllib.algorithms.dqn.dqn_tf_policy import build_q_model
  12. from ray.rllib.models.action_dist import ActionDistribution
  13. from ray.rllib.models.modelv2 import ModelV2
  14. from ray.rllib.models.tf.tf_action_dist import Categorical
  15. from ray.rllib.models.torch.torch_action_dist import TorchCategorical
  16. from ray.rllib.policy.policy import Policy
  17. from ray.rllib.policy.tf_policy_template import build_tf_policy
  18. from ray.rllib.policy.sample_batch import SampleBatch
  19. from ray.rllib.policy.tf_mixins import LearningRateSchedule, TargetNetworkMixin
  20. from ray.rllib.utils.framework import try_import_tf
  21. from ray.rllib.utils.tf_utils import huber_loss
  22. from ray.rllib.utils.typing import ModelInputDict, TensorType, AlgorithmConfigDict
  23. tf1, tf, tfv = try_import_tf()
  24. def build_r2d2_model(
  25. policy: Policy,
  26. obs_space: gym.spaces.Space,
  27. action_space: gym.spaces.Space,
  28. config: AlgorithmConfigDict,
  29. ) -> Tuple[ModelV2, ActionDistribution]:
  30. """Build q_model and target_model for DQN
  31. Args:
  32. policy: The policy, which will use the model for optimization.
  33. obs_space (gym.spaces.Space): The policy's observation space.
  34. action_space (gym.spaces.Space): The policy's action space.
  35. config (AlgorithmConfigDict):
  36. Returns:
  37. q_model
  38. Note: The target q model will not be returned, just assigned to
  39. `policy.target_model`.
  40. """
  41. # Create the policy's models.
  42. model = build_q_model(policy, obs_space, action_space, config)
  43. # Assert correct model type by checking the init state to be present.
  44. # For attention nets: These don't necessarily publish their init state via
  45. # Model.get_initial_state, but may only use the trajectory view API
  46. # (view_requirements).
  47. assert (
  48. model.get_initial_state() != []
  49. or model.view_requirements.get("state_in_0") is not None
  50. ), (
  51. "R2D2 requires its model to be a recurrent one! Try using "
  52. "`model.use_lstm` or `model.use_attention` in your config "
  53. "to auto-wrap your model with an LSTM- or attention net."
  54. )
  55. return model
  56. def r2d2_loss(policy: Policy, model, _, train_batch: SampleBatch) -> TensorType:
  57. """Constructs the loss for R2D2TFPolicy.
  58. Args:
  59. policy: The Policy to calculate the loss for.
  60. model (ModelV2): The Model to calculate the loss for.
  61. train_batch: The training data.
  62. Returns:
  63. TensorType: A single loss tensor.
  64. """
  65. config = policy.config
  66. # Construct internal state inputs.
  67. i = 0
  68. state_batches = []
  69. while "state_in_{}".format(i) in train_batch:
  70. state_batches.append(train_batch["state_in_{}".format(i)])
  71. i += 1
  72. assert state_batches
  73. # Q-network evaluation (at t).
  74. q, _, _, _ = compute_q_values(
  75. policy,
  76. model,
  77. train_batch,
  78. state_batches=state_batches,
  79. seq_lens=train_batch.get(SampleBatch.SEQ_LENS),
  80. explore=False,
  81. is_training=True,
  82. )
  83. # Target Q-network evaluation (at t+1).
  84. q_target, _, _, _ = compute_q_values(
  85. policy,
  86. policy.target_model,
  87. train_batch,
  88. state_batches=state_batches,
  89. seq_lens=train_batch.get(SampleBatch.SEQ_LENS),
  90. explore=False,
  91. is_training=True,
  92. )
  93. if not hasattr(policy, "target_q_func_vars"):
  94. policy.target_q_func_vars = policy.target_model.variables()
  95. actions = tf.cast(train_batch[SampleBatch.ACTIONS], tf.int64)
  96. dones = tf.cast(train_batch[SampleBatch.TERMINATEDS], tf.float32)
  97. rewards = train_batch[SampleBatch.REWARDS]
  98. weights = tf.cast(train_batch[PRIO_WEIGHTS], tf.float32)
  99. B = tf.shape(state_batches[0])[0]
  100. T = tf.shape(q)[0] // B
  101. # Q scores for actions which we know were selected in the given state.
  102. one_hot_selection = tf.one_hot(actions, policy.action_space.n)
  103. q_selected = tf.reduce_sum(
  104. tf.where(q > tf.float32.min, q, tf.zeros_like(q)) * one_hot_selection, axis=1
  105. )
  106. if config["double_q"]:
  107. best_actions = tf.argmax(q, axis=1)
  108. else:
  109. best_actions = tf.argmax(q_target, axis=1)
  110. best_actions_one_hot = tf.one_hot(best_actions, policy.action_space.n)
  111. q_target_best = tf.reduce_sum(
  112. tf.where(q_target > tf.float32.min, q_target, tf.zeros_like(q_target))
  113. * best_actions_one_hot,
  114. axis=1,
  115. )
  116. if config["num_atoms"] > 1:
  117. raise ValueError("Distributional R2D2 not supported yet!")
  118. else:
  119. q_target_best_masked_tp1 = (1.0 - dones) * tf.concat(
  120. [q_target_best[1:], tf.constant([0.0])], axis=0
  121. )
  122. if config["use_h_function"]:
  123. h_inv = h_inverse(q_target_best_masked_tp1, config["h_function_epsilon"])
  124. target = h_function(
  125. rewards + config["gamma"] ** config["n_step"] * h_inv,
  126. config["h_function_epsilon"],
  127. )
  128. else:
  129. target = (
  130. rewards + config["gamma"] ** config["n_step"] * q_target_best_masked_tp1
  131. )
  132. # Seq-mask all loss-related terms.
  133. seq_mask = tf.sequence_mask(train_batch[SampleBatch.SEQ_LENS], T)[:, :-1]
  134. # Mask away also the burn-in sequence at the beginning.
  135. burn_in = policy.config["replay_buffer_config"]["replay_burn_in"]
  136. # Making sure, this works for both static graph and eager.
  137. if burn_in > 0:
  138. seq_mask = tf.cond(
  139. pred=tf.convert_to_tensor(burn_in, tf.int32) < T,
  140. true_fn=lambda: tf.concat(
  141. [tf.fill([B, burn_in], False), seq_mask[:, burn_in:]], 1
  142. ),
  143. false_fn=lambda: seq_mask,
  144. )
  145. def reduce_mean_valid(t):
  146. return tf.reduce_mean(tf.boolean_mask(t, seq_mask))
  147. # Make sure to use the correct time indices:
  148. # Q(t) - [gamma * r + Q^(t+1)]
  149. q_selected = tf.reshape(q_selected, [B, T])[:, :-1]
  150. td_error = q_selected - tf.stop_gradient(tf.reshape(target, [B, T])[:, :-1])
  151. td_error = td_error * tf.cast(seq_mask, tf.float32)
  152. weights = tf.reshape(weights, [B, T])[:, :-1]
  153. policy._total_loss = reduce_mean_valid(weights * huber_loss(td_error))
  154. # Store the TD-error per time chunk (b/c we need only one mean
  155. # prioritized replay weight per stored sequence).
  156. policy._td_error = tf.reduce_mean(td_error, axis=-1)
  157. policy._loss_stats = {
  158. "mean_q": reduce_mean_valid(q_selected),
  159. "min_q": tf.reduce_min(q_selected),
  160. "max_q": tf.reduce_max(q_selected),
  161. "mean_td_error": reduce_mean_valid(td_error),
  162. }
  163. return policy._total_loss
  164. def h_function(x, epsilon=1.0):
  165. """h-function to normalize target Qs, described in the paper [1].
  166. h(x) = sign(x) * [sqrt(abs(x) + 1) - 1] + epsilon * x
  167. Used in [1] in combination with h_inverse:
  168. targets = h(r + gamma * h_inverse(Q^))
  169. """
  170. return tf.sign(x) * (tf.sqrt(tf.abs(x) + 1.0) - 1.0) + epsilon * x
  171. def h_inverse(x, epsilon=1.0):
  172. """Inverse if the above h-function, described in the paper [1].
  173. If x > 0.0:
  174. h-1(x) = [2eps * x + (2eps + 1) - sqrt(4eps x + (2eps + 1)^2)] /
  175. (2 * eps^2)
  176. If x < 0.0:
  177. h-1(x) = [2eps * x + (2eps + 1) + sqrt(-4eps x + (2eps + 1)^2)] /
  178. (2 * eps^2)
  179. """
  180. two_epsilon = epsilon * 2
  181. if_x_pos = (
  182. two_epsilon * x
  183. + (two_epsilon + 1.0)
  184. - tf.sqrt(4.0 * epsilon * x + (two_epsilon + 1.0) ** 2)
  185. ) / (2.0 * epsilon**2)
  186. if_x_neg = (
  187. two_epsilon * x
  188. - (two_epsilon + 1.0)
  189. + tf.sqrt(-4.0 * epsilon * x + (two_epsilon + 1.0) ** 2)
  190. ) / (2.0 * epsilon**2)
  191. return tf.where(x < 0.0, if_x_neg, if_x_pos)
  192. class ComputeTDErrorMixin:
  193. """Assign the `compute_td_error` method to the R2D2TFPolicy
  194. This allows us to prioritize on the worker side.
  195. """
  196. def __init__(self):
  197. def compute_td_error(
  198. obs_t, act_t, rew_t, obs_tp1, terminateds_mask, importance_weights
  199. ):
  200. input_dict = self._lazy_tensor_dict({SampleBatch.CUR_OBS: obs_t})
  201. input_dict[SampleBatch.ACTIONS] = act_t
  202. input_dict[SampleBatch.REWARDS] = rew_t
  203. input_dict[SampleBatch.NEXT_OBS] = obs_tp1
  204. input_dict[SampleBatch.TERMINATEDS] = terminateds_mask
  205. input_dict[PRIO_WEIGHTS] = importance_weights
  206. # Do forward pass on loss to update td error attribute
  207. r2d2_loss(self, self.model, None, input_dict)
  208. return self._td_error
  209. self.compute_td_error = compute_td_error
  210. def get_distribution_inputs_and_class(
  211. policy: Policy,
  212. model: ModelV2,
  213. *,
  214. input_dict: ModelInputDict,
  215. state_batches: Optional[List[TensorType]] = None,
  216. seq_lens: Optional[TensorType] = None,
  217. explore: bool = True,
  218. is_training: bool = False,
  219. **kwargs
  220. ) -> Tuple[TensorType, type, List[TensorType]]:
  221. if policy.config["framework"] == "torch":
  222. from ray.rllib.algorithms.r2d2.r2d2_torch_policy import (
  223. compute_q_values as torch_compute_q_values,
  224. )
  225. func = torch_compute_q_values
  226. else:
  227. func = compute_q_values
  228. q_vals, logits, probs_or_logits, state_out = func(
  229. policy, model, input_dict, state_batches, seq_lens, explore, is_training
  230. )
  231. policy.q_values = q_vals
  232. if not hasattr(policy, "q_func_vars"):
  233. policy.q_func_vars = model.variables()
  234. action_dist_class = (
  235. TorchCategorical if policy.config["framework"] == "torch" else Categorical
  236. )
  237. return policy.q_values, action_dist_class, state_out
  238. def adam_optimizer(
  239. policy: Policy, config: AlgorithmConfigDict
  240. ) -> "tf.keras.optimizers.Optimizer":
  241. return tf1.train.AdamOptimizer(
  242. learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"]
  243. )
  244. def build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]:
  245. return dict(
  246. {
  247. "cur_lr": policy.cur_lr,
  248. },
  249. **policy._loss_stats
  250. )
  251. def setup_early_mixins(
  252. policy: Policy, obs_space, action_space, config: AlgorithmConfigDict
  253. ) -> None:
  254. LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
  255. def before_loss_init(
  256. policy: Policy,
  257. obs_space: gym.spaces.Space,
  258. action_space: gym.spaces.Space,
  259. config: AlgorithmConfigDict,
  260. ) -> None:
  261. ComputeTDErrorMixin.__init__(policy)
  262. def setup_late_mixins(
  263. policy: Policy,
  264. obs_space: gym.spaces.Space,
  265. action_space: gym.spaces.Space,
  266. config: AlgorithmConfigDict,
  267. ) -> None:
  268. TargetNetworkMixin.__init__(policy)
  269. R2D2TFPolicy = build_tf_policy(
  270. name="R2D2TFPolicy",
  271. loss_fn=r2d2_loss,
  272. get_default_config=lambda: ray.rllib.algorithms.r2d2.r2d2.R2D2Config(),
  273. postprocess_fn=postprocess_nstep_and_prio,
  274. stats_fn=build_q_stats,
  275. make_model=build_r2d2_model,
  276. action_distribution_fn=get_distribution_inputs_and_class,
  277. optimizer_fn=adam_optimizer,
  278. extra_action_out_fn=lambda policy: {"q_values": policy.q_values},
  279. compute_gradients_fn=clip_gradients,
  280. extra_learn_fetches_fn=lambda policy: {"td_error": policy._td_error},
  281. before_init=setup_early_mixins,
  282. before_loss_init=before_loss_init,
  283. after_init=setup_late_mixins,
  284. mixins=[
  285. TargetNetworkMixin,
  286. ComputeTDErrorMixin,
  287. LearningRateSchedule,
  288. ],
  289. )