policy.py 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877
  1. import json
  2. import logging
  3. import os
  4. import platform
  5. from abc import ABCMeta, abstractmethod
  6. from typing import (
  7. TYPE_CHECKING,
  8. Any,
  9. Callable,
  10. Container,
  11. Dict,
  12. List,
  13. Mapping,
  14. Optional,
  15. Tuple,
  16. Type,
  17. Union,
  18. )
  19. import gymnasium as gym
  20. import numpy as np
  21. import tree # pip install dm_tree
  22. from gymnasium.spaces import Box
  23. from packaging import version
  24. import ray
  25. import ray.cloudpickle as pickle
  26. from ray.actor import ActorHandle
  27. from ray.train import Checkpoint
  28. from ray.rllib.core.models.base import STATE_IN, STATE_OUT
  29. from ray.rllib.models.action_dist import ActionDistribution
  30. from ray.rllib.models.catalog import ModelCatalog
  31. from ray.rllib.models.modelv2 import ModelV2
  32. from ray.rllib.policy.rnn_sequencing import add_time_dimension
  33. from ray.rllib.policy.sample_batch import SampleBatch
  34. from ray.rllib.policy.view_requirement import ViewRequirement
  35. from ray.rllib.utils.annotations import (
  36. DeveloperAPI,
  37. ExperimentalAPI,
  38. OverrideToImplementCustomLogic,
  39. OverrideToImplementCustomLogic_CallToSuperRecommended,
  40. is_overridden,
  41. )
  42. from ray.rllib.utils.checkpoints import (
  43. CHECKPOINT_VERSION,
  44. get_checkpoint_info,
  45. try_import_msgpack,
  46. )
  47. from ray.rllib.utils.deprecation import (
  48. Deprecated,
  49. DEPRECATED_VALUE,
  50. deprecation_warning,
  51. )
  52. from ray.rllib.utils.exploration.exploration import Exploration
  53. from ray.rllib.utils.framework import try_import_tf, try_import_torch
  54. from ray.rllib.utils.from_config import from_config
  55. from ray.rllib.utils.numpy import convert_to_numpy
  56. from ray.rllib.utils.serialization import (
  57. deserialize_type,
  58. space_from_dict,
  59. space_to_dict,
  60. )
  61. from ray.rllib.utils.spaces.space_utils import (
  62. get_base_struct_from_space,
  63. get_dummy_batch_for_space,
  64. unbatch,
  65. )
  66. from ray.rllib.utils.tensor_dtype import get_np_dtype
  67. from ray.rllib.utils.tf_utils import get_tf_eager_cls_if_necessary
  68. from ray.rllib.utils.typing import (
  69. AgentID,
  70. AlgorithmConfigDict,
  71. ModelGradients,
  72. ModelWeights,
  73. PolicyID,
  74. PolicyState,
  75. T,
  76. TensorStructType,
  77. TensorType,
  78. )
  79. from ray.util.annotations import PublicAPI
  80. tf1, tf, tfv = try_import_tf()
  81. torch, _ = try_import_torch()
  82. if TYPE_CHECKING:
  83. from ray.rllib.evaluation import Episode
  84. from ray.rllib.core.rl_module import RLModule
  85. logger = logging.getLogger(__name__)
  86. @PublicAPI
  87. class PolicySpec:
  88. """A policy spec used in the "config.multiagent.policies" specification dict.
  89. As values (keys are the policy IDs (str)). E.g.:
  90. config:
  91. multiagent:
  92. policies: {
  93. "pol1": PolicySpec(None, Box, Discrete(2), {"lr": 0.0001}),
  94. "pol2": PolicySpec(config={"lr": 0.001}),
  95. }
  96. """
  97. def __init__(
  98. self, policy_class=None, observation_space=None, action_space=None, config=None
  99. ):
  100. # If None, use the Algorithm's default policy class stored under
  101. # `Algorithm._policy_class`.
  102. self.policy_class = policy_class
  103. # If None, use the env's observation space. If None and there is no Env
  104. # (e.g. offline RL), an error is thrown.
  105. self.observation_space = observation_space
  106. # If None, use the env's action space. If None and there is no Env
  107. # (e.g. offline RL), an error is thrown.
  108. self.action_space = action_space
  109. # Overrides defined keys in the main Algorithm config.
  110. # If None, use {}.
  111. self.config = config
  112. def __eq__(self, other: "PolicySpec"):
  113. return (
  114. self.policy_class == other.policy_class
  115. and self.observation_space == other.observation_space
  116. and self.action_space == other.action_space
  117. and self.config == other.config
  118. )
  119. def serialize(self) -> Dict:
  120. from ray.rllib.algorithms.registry import get_policy_class_name
  121. # Try to figure out a durable name for this policy.
  122. cls = get_policy_class_name(self.policy_class)
  123. if cls is None:
  124. logger.warning(
  125. f"Can not figure out a durable policy name for {self.policy_class}. "
  126. f"You are probably trying to checkpoint a custom policy. "
  127. f"Raw policy class may cause problems when the checkpoint needs to "
  128. "be loaded in the future. To fix this, make sure you add your "
  129. "custom policy in rllib.algorithms.registry.POLICIES."
  130. )
  131. cls = self.policy_class
  132. return {
  133. "policy_class": cls,
  134. "observation_space": space_to_dict(self.observation_space),
  135. "action_space": space_to_dict(self.action_space),
  136. # TODO(jungong) : try making the config dict durable by maybe
  137. # getting rid of all the fields that are not JSON serializable.
  138. "config": self.config,
  139. }
  140. @classmethod
  141. def deserialize(cls, spec: Dict) -> "PolicySpec":
  142. if isinstance(spec["policy_class"], str):
  143. # Try to recover the actual policy class from durable name.
  144. from ray.rllib.algorithms.registry import get_policy_class
  145. policy_class = get_policy_class(spec["policy_class"])
  146. elif isinstance(spec["policy_class"], type):
  147. # Policy spec is already a class type. Simply use it.
  148. policy_class = spec["policy_class"]
  149. else:
  150. raise AttributeError(f"Unknown policy class spec {spec['policy_class']}")
  151. return cls(
  152. policy_class=policy_class,
  153. observation_space=space_from_dict(spec["observation_space"]),
  154. action_space=space_from_dict(spec["action_space"]),
  155. config=spec["config"],
  156. )
  157. @DeveloperAPI
  158. class Policy(metaclass=ABCMeta):
  159. """RLlib's base class for all Policy implementations.
  160. Policy is the abstract superclass for all DL-framework specific sub-classes
  161. (e.g. TFPolicy or TorchPolicy). It exposes APIs to
  162. 1. Compute actions from observation (and possibly other) inputs.
  163. 2. Manage the Policy's NN model(s), like exporting and loading their weights.
  164. 3. Postprocess a given trajectory from the environment or other input via the
  165. `postprocess_trajectory` method.
  166. 4. Compute losses from a train batch.
  167. 5. Perform updates from a train batch on the NN-models (this normally includes loss
  168. calculations) either:
  169. a. in one monolithic step (`learn_on_batch`)
  170. b. via batch pre-loading, then n steps of actual loss computations and updates
  171. (`load_batch_into_buffer` + `learn_on_loaded_batch`).
  172. """
  173. @DeveloperAPI
  174. def __init__(
  175. self,
  176. observation_space: gym.Space,
  177. action_space: gym.Space,
  178. config: AlgorithmConfigDict,
  179. ):
  180. """Initializes a Policy instance.
  181. Args:
  182. observation_space: Observation space of the policy.
  183. action_space: Action space of the policy.
  184. config: A complete Algorithm/Policy config dict. For the default
  185. config keys and values, see rllib/algorithm/algorithm.py.
  186. """
  187. self.observation_space: gym.Space = observation_space
  188. self.action_space: gym.Space = action_space
  189. # the policy id in the global context.
  190. self.__policy_id = config.get("__policy_id")
  191. # The base struct of the observation/action spaces.
  192. # E.g. action-space = gym.spaces.Dict({"a": Discrete(2)}) ->
  193. # action_space_struct = {"a": Discrete(2)}
  194. self.observation_space_struct = get_base_struct_from_space(observation_space)
  195. self.action_space_struct = get_base_struct_from_space(action_space)
  196. self.config: AlgorithmConfigDict = config
  197. self.framework = self.config.get("framework")
  198. # Create the callbacks object to use for handling custom callbacks.
  199. from ray.rllib.algorithms.callbacks import DefaultCallbacks
  200. callbacks = self.config.get("callbacks")
  201. if isinstance(callbacks, DefaultCallbacks):
  202. self.callbacks = callbacks()
  203. elif isinstance(callbacks, (str, type)):
  204. try:
  205. self.callbacks: "DefaultCallbacks" = deserialize_type(
  206. self.config.get("callbacks")
  207. )()
  208. except Exception:
  209. pass # TEST
  210. else:
  211. self.callbacks: "DefaultCallbacks" = DefaultCallbacks()
  212. # The global timestep, broadcast down from time to time from the
  213. # local worker to all remote workers.
  214. self.global_timestep: int = 0
  215. # The number of gradient updates this policy has undergone.
  216. self.num_grad_updates: int = 0
  217. # The action distribution class to use for action sampling, if any.
  218. # Child classes may set this.
  219. self.dist_class: Optional[Type] = None
  220. # Initialize view requirements.
  221. self.init_view_requirements()
  222. # Whether the Model's initial state (method) has been added
  223. # automatically based on the given view requirements of the model.
  224. self._model_init_state_automatically_added = False
  225. # Connectors.
  226. self.agent_connectors = None
  227. self.action_connectors = None
  228. @staticmethod
  229. def from_checkpoint(
  230. checkpoint: Union[str, Checkpoint],
  231. policy_ids: Optional[Container[PolicyID]] = None,
  232. ) -> Union["Policy", Dict[PolicyID, "Policy"]]:
  233. """Creates new Policy instance(s) from a given Policy or Algorithm checkpoint.
  234. Note: This method must remain backward compatible from 2.1.0 on, wrt.
  235. checkpoints created with Ray 2.0.0 or later.
  236. Args:
  237. checkpoint: The path (str) to a Policy or Algorithm checkpoint directory
  238. or an AIR Checkpoint (Policy or Algorithm) instance to restore
  239. from.
  240. If checkpoint is a Policy checkpoint, `policy_ids` must be None
  241. and only the Policy in that checkpoint is restored and returned.
  242. If checkpoint is an Algorithm checkpoint and `policy_ids` is None,
  243. will return a list of all Policy objects found in
  244. the checkpoint, otherwise a list of those policies in `policy_ids`.
  245. policy_ids: List of policy IDs to extract from a given Algorithm checkpoint.
  246. If None and an Algorithm checkpoint is provided, will restore all
  247. policies found in that checkpoint. If a Policy checkpoint is given,
  248. this arg must be None.
  249. Returns:
  250. An instantiated Policy, if `checkpoint` is a Policy checkpoint. A dict
  251. mapping PolicyID to Policies, if `checkpoint` is an Algorithm checkpoint.
  252. In the latter case, returns all policies within the Algorithm if
  253. `policy_ids` is None, else a dict of only those Policies that are in
  254. `policy_ids`.
  255. """
  256. checkpoint_info = get_checkpoint_info(checkpoint)
  257. # Algorithm checkpoint: Extract one or more policies from it and return them
  258. # in a dict (mapping PolicyID to Policy instances).
  259. if checkpoint_info["type"] == "Algorithm":
  260. from ray.rllib.algorithms.algorithm import Algorithm
  261. policies = {}
  262. # Old Algorithm checkpoints: State must be completely retrieved from:
  263. # algo state file -> worker -> "state".
  264. if checkpoint_info["checkpoint_version"] < version.Version("1.0"):
  265. with open(checkpoint_info["state_file"], "rb") as f:
  266. state = pickle.load(f)
  267. # In older checkpoint versions, the policy states are stored under
  268. # "state" within the worker state (which is pickled in itself).
  269. worker_state = pickle.loads(state["worker"])
  270. policy_states = worker_state["state"]
  271. for pid, policy_state in policy_states.items():
  272. # Get spec and config, merge config with
  273. serialized_policy_spec = worker_state["policy_specs"][pid]
  274. policy_config = Algorithm.merge_algorithm_configs(
  275. worker_state["policy_config"], serialized_policy_spec["config"]
  276. )
  277. serialized_policy_spec.update({"config": policy_config})
  278. policy_state.update({"policy_spec": serialized_policy_spec})
  279. policies[pid] = Policy.from_state(policy_state)
  280. # Newer versions: Get policy states from "policies/" sub-dirs.
  281. elif checkpoint_info["policy_ids"] is not None:
  282. for policy_id in checkpoint_info["policy_ids"]:
  283. if policy_ids is None or policy_id in policy_ids:
  284. policy_checkpoint_info = get_checkpoint_info(
  285. os.path.join(
  286. checkpoint_info["checkpoint_dir"],
  287. "policies",
  288. policy_id,
  289. )
  290. )
  291. assert policy_checkpoint_info["type"] == "Policy"
  292. with open(policy_checkpoint_info["state_file"], "rb") as f:
  293. policy_state = pickle.load(f)
  294. policies[policy_id] = Policy.from_state(policy_state)
  295. return policies
  296. # Policy checkpoint: Return a single Policy instance.
  297. else:
  298. msgpack = None
  299. if checkpoint_info.get("format") == "msgpack":
  300. msgpack = try_import_msgpack(error=True)
  301. with open(checkpoint_info["state_file"], "rb") as f:
  302. if msgpack is not None:
  303. state = msgpack.load(f)
  304. else:
  305. state = pickle.load(f)
  306. return Policy.from_state(state)
  307. @staticmethod
  308. def from_state(state: PolicyState) -> "Policy":
  309. """Recovers a Policy from a state object.
  310. The `state` of an instantiated Policy can be retrieved by calling its
  311. `get_state` method. This only works for the V2 Policy classes (EagerTFPolicyV2,
  312. SynamicTFPolicyV2, and TorchPolicyV2). It contains all information necessary
  313. to create the Policy. No access to the original code (e.g. configs, knowledge of
  314. the policy's class, etc..) is needed.
  315. Args:
  316. state: The state to recover a new Policy instance from.
  317. Returns:
  318. A new Policy instance.
  319. """
  320. serialized_pol_spec: Optional[dict] = state.get("policy_spec")
  321. if serialized_pol_spec is None:
  322. raise ValueError(
  323. "No `policy_spec` key was found in given `state`! "
  324. "Cannot create new Policy."
  325. )
  326. pol_spec = PolicySpec.deserialize(serialized_pol_spec)
  327. actual_class = get_tf_eager_cls_if_necessary(
  328. pol_spec.policy_class,
  329. pol_spec.config,
  330. )
  331. if pol_spec.config["framework"] == "tf":
  332. from ray.rllib.policy.tf_policy import TFPolicy
  333. return TFPolicy._tf1_from_state_helper(state)
  334. # Create the new policy.
  335. new_policy = actual_class(
  336. # Note(jungong) : we are intentionally not using keyward arguments here
  337. # because some policies name the observation space parameter obs_space,
  338. # and some others name it observation_space.
  339. pol_spec.observation_space,
  340. pol_spec.action_space,
  341. pol_spec.config,
  342. )
  343. # Set the new policy's state (weights, optimizer vars, exploration state,
  344. # etc..).
  345. new_policy.set_state(state)
  346. # Return the new policy.
  347. return new_policy
  348. @ExperimentalAPI
  349. @OverrideToImplementCustomLogic
  350. def make_rl_module(self) -> "RLModule":
  351. """Returns the RL Module (only for when RLModule API is enabled.)
  352. If RLModule API is enabled (self.config.rl_module(_enable_rl_module_api=True),
  353. this method should be implemented and should return the RLModule instance to
  354. use for this Policy. Otherwise, RLlib will error out.
  355. """
  356. # if imported on top it creates circular dependency
  357. from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec
  358. if self.__policy_id is None:
  359. raise ValueError(
  360. "When using RLModule API, `policy_id` within the policies must be "
  361. "set. This should have happened automatically. If you see this "
  362. "bug, please file a github issue."
  363. )
  364. spec = self.config["__marl_module_spec"]
  365. if isinstance(spec, SingleAgentRLModuleSpec):
  366. module = spec.build()
  367. else:
  368. # filter the module_spec to only contain the policy_id of this policy
  369. marl_spec = type(spec)(
  370. marl_module_class=spec.marl_module_class,
  371. module_specs={self.__policy_id: spec.module_specs[self.__policy_id]},
  372. )
  373. marl_module = marl_spec.build()
  374. module = marl_module[self.__policy_id]
  375. return module
  376. @DeveloperAPI
  377. def init_view_requirements(self):
  378. """Maximal view requirements dict for `learn_on_batch()` and
  379. `compute_actions` calls.
  380. Specific policies can override this function to provide custom
  381. list of view requirements.
  382. """
  383. # Maximal view requirements dict for `learn_on_batch()` and
  384. # `compute_actions` calls.
  385. # View requirements will be automatically filtered out later based
  386. # on the postprocessing and loss functions to ensure optimal data
  387. # collection and transfer performance.
  388. view_reqs = self._get_default_view_requirements()
  389. if not hasattr(self, "view_requirements"):
  390. self.view_requirements = view_reqs
  391. else:
  392. for k, v in view_reqs.items():
  393. if k not in self.view_requirements:
  394. self.view_requirements[k] = v
  395. def get_connector_metrics(self) -> Dict:
  396. """Get metrics on timing from connectors."""
  397. return {
  398. "agent_connectors": {
  399. name + "_ms": 1000 * timer.mean
  400. for name, timer in self.agent_connectors.timers.items()
  401. },
  402. "action_connectors": {
  403. name + "_ms": 1000 * timer.mean
  404. for name, timer in self.agent_connectors.timers.items()
  405. },
  406. }
  407. def reset_connectors(self, env_id) -> None:
  408. """Reset action- and agent-connectors for this policy."""
  409. self.agent_connectors.reset(env_id=env_id)
  410. self.action_connectors.reset(env_id=env_id)
  411. @DeveloperAPI
  412. def compute_single_action(
  413. self,
  414. obs: Optional[TensorStructType] = None,
  415. state: Optional[List[TensorType]] = None,
  416. *,
  417. prev_action: Optional[TensorStructType] = None,
  418. prev_reward: Optional[TensorStructType] = None,
  419. info: dict = None,
  420. input_dict: Optional[SampleBatch] = None,
  421. episode: Optional["Episode"] = None,
  422. explore: Optional[bool] = None,
  423. timestep: Optional[int] = None,
  424. # Kwars placeholder for future compatibility.
  425. **kwargs,
  426. ) -> Tuple[TensorStructType, List[TensorType], Dict[str, TensorType]]:
  427. """Computes and returns a single (B=1) action value.
  428. Takes an input dict (usually a SampleBatch) as its main data input.
  429. This allows for using this method in case a more complex input pattern
  430. (view requirements) is needed, for example when the Model requires the
  431. last n observations, the last m actions/rewards, or a combination
  432. of any of these.
  433. Alternatively, in case no complex inputs are required, takes a single
  434. `obs` values (and possibly single state values, prev-action/reward
  435. values, etc..).
  436. Args:
  437. obs: Single observation.
  438. state: List of RNN state inputs, if any.
  439. prev_action: Previous action value, if any.
  440. prev_reward: Previous reward, if any.
  441. info: Info object, if any.
  442. input_dict: A SampleBatch or input dict containing the
  443. single (unbatched) Tensors to compute actions. If given, it'll
  444. be used instead of `obs`, `state`, `prev_action|reward`, and
  445. `info`.
  446. episode: This provides access to all of the internal episode state,
  447. which may be useful for model-based or multi-agent algorithms.
  448. explore: Whether to pick an exploitation or
  449. exploration action
  450. (default: None -> use self.config["explore"]).
  451. timestep: The current (sampling) time step.
  452. Keyword Args:
  453. kwargs: Forward compatibility placeholder.
  454. Returns:
  455. Tuple consisting of the action, the list of RNN state outputs (if
  456. any), and a dictionary of extra features (if any).
  457. """
  458. # Build the input-dict used for the call to
  459. # `self.compute_actions_from_input_dict()`.
  460. if input_dict is None:
  461. input_dict = {SampleBatch.OBS: obs}
  462. if state is not None:
  463. if self.config.get("_enable_rl_module_api", False):
  464. input_dict["state_in"] = state
  465. else:
  466. for i, s in enumerate(state):
  467. input_dict[f"state_in_{i}"] = s
  468. if prev_action is not None:
  469. input_dict[SampleBatch.PREV_ACTIONS] = prev_action
  470. if prev_reward is not None:
  471. input_dict[SampleBatch.PREV_REWARDS] = prev_reward
  472. if info is not None:
  473. input_dict[SampleBatch.INFOS] = info
  474. # Batch all data in input dict.
  475. input_dict = tree.map_structure_with_path(
  476. lambda p, s: (
  477. s
  478. if p == "seq_lens"
  479. else s.unsqueeze(0)
  480. if torch and isinstance(s, torch.Tensor)
  481. else np.expand_dims(s, 0)
  482. ),
  483. input_dict,
  484. )
  485. episodes = None
  486. if episode is not None:
  487. episodes = [episode]
  488. out = self.compute_actions_from_input_dict(
  489. input_dict=SampleBatch(input_dict),
  490. episodes=episodes,
  491. explore=explore,
  492. timestep=timestep,
  493. )
  494. # Some policies don't return a tuple, but always just a single action.
  495. # E.g. ES and ARS.
  496. if not isinstance(out, tuple):
  497. single_action = out
  498. state_out = []
  499. info = {}
  500. # Normal case: Policy should return (action, state, info) tuple.
  501. else:
  502. batched_action, state_out, info = out
  503. single_action = unbatch(batched_action)
  504. assert len(single_action) == 1
  505. single_action = single_action[0]
  506. # Return action, internal state(s), infos.
  507. return (
  508. single_action,
  509. tree.map_structure(lambda x: x[0], state_out),
  510. tree.map_structure(lambda x: x[0], info),
  511. )
  512. @DeveloperAPI
  513. def compute_actions_from_input_dict(
  514. self,
  515. input_dict: Union[SampleBatch, Dict[str, TensorStructType]],
  516. explore: Optional[bool] = None,
  517. timestep: Optional[int] = None,
  518. episodes: Optional[List["Episode"]] = None,
  519. **kwargs,
  520. ) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
  521. """Computes actions from collected samples (across multiple-agents).
  522. Takes an input dict (usually a SampleBatch) as its main data input.
  523. This allows for using this method in case a more complex input pattern
  524. (view requirements) is needed, for example when the Model requires the
  525. last n observations, the last m actions/rewards, or a combination
  526. of any of these.
  527. Args:
  528. input_dict: A SampleBatch or input dict containing the Tensors
  529. to compute actions. `input_dict` already abides to the
  530. Policy's as well as the Model's view requirements and can
  531. thus be passed to the Model as-is.
  532. explore: Whether to pick an exploitation or exploration
  533. action (default: None -> use self.config["explore"]).
  534. timestep: The current (sampling) time step.
  535. episodes: This provides access to all of the internal episodes'
  536. state, which may be useful for model-based or multi-agent
  537. algorithms.
  538. Keyword Args:
  539. kwargs: Forward compatibility placeholder.
  540. Returns:
  541. actions: Batch of output actions, with shape like
  542. [BATCH_SIZE, ACTION_SHAPE].
  543. state_outs: List of RNN state output
  544. batches, if any, each with shape [BATCH_SIZE, STATE_SIZE].
  545. info: Dictionary of extra feature batches, if any, with shape like
  546. {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
  547. """
  548. # Default implementation just passes obs, prev-a/r, and states on to
  549. # `self.compute_actions()`.
  550. state_batches = [s for k, s in input_dict.items() if k.startswith("state_in")]
  551. return self.compute_actions(
  552. input_dict[SampleBatch.OBS],
  553. state_batches,
  554. prev_action_batch=input_dict.get(SampleBatch.PREV_ACTIONS),
  555. prev_reward_batch=input_dict.get(SampleBatch.PREV_REWARDS),
  556. info_batch=input_dict.get(SampleBatch.INFOS),
  557. explore=explore,
  558. timestep=timestep,
  559. episodes=episodes,
  560. **kwargs,
  561. )
  562. @abstractmethod
  563. @DeveloperAPI
  564. def compute_actions(
  565. self,
  566. obs_batch: Union[List[TensorStructType], TensorStructType],
  567. state_batches: Optional[List[TensorType]] = None,
  568. prev_action_batch: Union[List[TensorStructType], TensorStructType] = None,
  569. prev_reward_batch: Union[List[TensorStructType], TensorStructType] = None,
  570. info_batch: Optional[Dict[str, list]] = None,
  571. episodes: Optional[List["Episode"]] = None,
  572. explore: Optional[bool] = None,
  573. timestep: Optional[int] = None,
  574. **kwargs,
  575. ) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
  576. """Computes actions for the current policy.
  577. Args:
  578. obs_batch: Batch of observations.
  579. state_batches: List of RNN state input batches, if any.
  580. prev_action_batch: Batch of previous action values.
  581. prev_reward_batch: Batch of previous rewards.
  582. info_batch: Batch of info objects.
  583. episodes: List of Episode objects, one for each obs in
  584. obs_batch. This provides access to all of the internal
  585. episode state, which may be useful for model-based or
  586. multi-agent algorithms.
  587. explore: Whether to pick an exploitation or exploration action.
  588. Set to None (default) for using the value of
  589. `self.config["explore"]`.
  590. timestep: The current (sampling) time step.
  591. Keyword Args:
  592. kwargs: Forward compatibility placeholder
  593. Returns:
  594. actions: Batch of output actions, with shape like
  595. [BATCH_SIZE, ACTION_SHAPE].
  596. state_outs (List[TensorType]): List of RNN state output
  597. batches, if any, each with shape [BATCH_SIZE, STATE_SIZE].
  598. info (List[dict]): Dictionary of extra feature batches, if any,
  599. with shape like
  600. {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
  601. """
  602. raise NotImplementedError
  603. @DeveloperAPI
  604. def compute_log_likelihoods(
  605. self,
  606. actions: Union[List[TensorType], TensorType],
  607. obs_batch: Union[List[TensorType], TensorType],
  608. state_batches: Optional[List[TensorType]] = None,
  609. prev_action_batch: Optional[Union[List[TensorType], TensorType]] = None,
  610. prev_reward_batch: Optional[Union[List[TensorType], TensorType]] = None,
  611. actions_normalized: bool = True,
  612. in_training: bool = True,
  613. ) -> TensorType:
  614. """Computes the log-prob/likelihood for a given action and observation.
  615. The log-likelihood is calculated using this Policy's action
  616. distribution class (self.dist_class).
  617. Args:
  618. actions: Batch of actions, for which to retrieve the
  619. log-probs/likelihoods (given all other inputs: obs,
  620. states, ..).
  621. obs_batch: Batch of observations.
  622. state_batches: List of RNN state input batches, if any.
  623. prev_action_batch: Batch of previous action values.
  624. prev_reward_batch: Batch of previous rewards.
  625. actions_normalized: Is the given `actions` already normalized
  626. (between -1.0 and 1.0) or not? If not and
  627. `normalize_actions=True`, we need to normalize the given
  628. actions first, before calculating log likelihoods.
  629. in_training: Whether to use the forward_train() or forward_exploration() of
  630. the underlying RLModule.
  631. Returns:
  632. Batch of log probs/likelihoods, with shape: [BATCH_SIZE].
  633. """
  634. raise NotImplementedError
  635. @DeveloperAPI
  636. @OverrideToImplementCustomLogic_CallToSuperRecommended
  637. def postprocess_trajectory(
  638. self,
  639. sample_batch: SampleBatch,
  640. other_agent_batches: Optional[
  641. Dict[AgentID, Tuple["Policy", SampleBatch]]
  642. ] = None,
  643. episode: Optional["Episode"] = None,
  644. ) -> SampleBatch:
  645. """Implements algorithm-specific trajectory postprocessing.
  646. This will be called on each trajectory fragment computed during policy
  647. evaluation. Each fragment is guaranteed to be only from one episode.
  648. The given fragment may or may not contain the end of this episode,
  649. depending on the `batch_mode=truncate_episodes|complete_episodes`,
  650. `rollout_fragment_length`, and other settings.
  651. Args:
  652. sample_batch: batch of experiences for the policy,
  653. which will contain at most one episode trajectory.
  654. other_agent_batches: In a multi-agent env, this contains a
  655. mapping of agent ids to (policy, agent_batch) tuples
  656. containing the policy and experiences of the other agents.
  657. episode: An optional multi-agent episode object to provide
  658. access to all of the internal episode state, which may
  659. be useful for model-based or multi-agent algorithms.
  660. Returns:
  661. The postprocessed sample batch.
  662. """
  663. # The default implementation just returns the same, unaltered batch.
  664. return sample_batch
  665. @ExperimentalAPI
  666. @OverrideToImplementCustomLogic
  667. def loss(
  668. self, model: ModelV2, dist_class: ActionDistribution, train_batch: SampleBatch
  669. ) -> Union[TensorType, List[TensorType]]:
  670. """Loss function for this Policy.
  671. Override this method in order to implement custom loss computations.
  672. Args:
  673. model: The model to calculate the loss(es).
  674. dist_class: The action distribution class to sample actions
  675. from the model's outputs.
  676. train_batch: The input batch on which to calculate the loss.
  677. Returns:
  678. Either a single loss tensor or a list of loss tensors.
  679. """
  680. raise NotImplementedError
  681. @DeveloperAPI
  682. def learn_on_batch(self, samples: SampleBatch) -> Dict[str, TensorType]:
  683. """Perform one learning update, given `samples`.
  684. Either this method or the combination of `compute_gradients` and
  685. `apply_gradients` must be implemented by subclasses.
  686. Args:
  687. samples: The SampleBatch object to learn from.
  688. Returns:
  689. Dictionary of extra metadata from `compute_gradients()`.
  690. Examples:
  691. >>> policy, sample_batch = ... # doctest: +SKIP
  692. >>> policy.learn_on_batch(sample_batch) # doctest: +SKIP
  693. """
  694. # The default implementation is simply a fused `compute_gradients` plus
  695. # `apply_gradients` call.
  696. grads, grad_info = self.compute_gradients(samples)
  697. self.apply_gradients(grads)
  698. return grad_info
  699. @ExperimentalAPI
  700. def learn_on_batch_from_replay_buffer(
  701. self, replay_actor: ActorHandle, policy_id: PolicyID
  702. ) -> Dict[str, TensorType]:
  703. """Samples a batch from given replay actor and performs an update.
  704. Args:
  705. replay_actor: The replay buffer actor to sample from.
  706. policy_id: The ID of this policy.
  707. Returns:
  708. Dictionary of extra metadata from `compute_gradients()`.
  709. """
  710. # Sample a batch from the given replay actor.
  711. # Note that for better performance (less data sent through the
  712. # network), this policy should be co-located on the same node
  713. # as `replay_actor`. Such a co-location step is usually done during
  714. # the Algorithm's `setup()` phase.
  715. batch = ray.get(replay_actor.replay.remote(policy_id=policy_id))
  716. if batch is None:
  717. return {}
  718. # Send to own learn_on_batch method for updating.
  719. # TODO: hack w/ `hasattr`
  720. if hasattr(self, "devices") and len(self.devices) > 1:
  721. self.load_batch_into_buffer(batch, buffer_index=0)
  722. return self.learn_on_loaded_batch(offset=0, buffer_index=0)
  723. else:
  724. return self.learn_on_batch(batch)
  725. @DeveloperAPI
  726. def load_batch_into_buffer(self, batch: SampleBatch, buffer_index: int = 0) -> int:
  727. """Bulk-loads the given SampleBatch into the devices' memories.
  728. The data is split equally across all the Policy's devices.
  729. If the data is not evenly divisible by the batch size, excess data
  730. should be discarded.
  731. Args:
  732. batch: The SampleBatch to load.
  733. buffer_index: The index of the buffer (a MultiGPUTowerStack) to use
  734. on the devices. The number of buffers on each device depends
  735. on the value of the `num_multi_gpu_tower_stacks` config key.
  736. Returns:
  737. The number of tuples loaded per device.
  738. """
  739. raise NotImplementedError
  740. @DeveloperAPI
  741. def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int:
  742. """Returns the number of currently loaded samples in the given buffer.
  743. Args:
  744. buffer_index: The index of the buffer (a MultiGPUTowerStack)
  745. to use on the devices. The number of buffers on each device
  746. depends on the value of the `num_multi_gpu_tower_stacks` config
  747. key.
  748. Returns:
  749. The number of tuples loaded per device.
  750. """
  751. raise NotImplementedError
  752. @DeveloperAPI
  753. def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0):
  754. """Runs a single step of SGD on an already loaded data in a buffer.
  755. Runs an SGD step over a slice of the pre-loaded batch, offset by
  756. the `offset` argument (useful for performing n minibatch SGD
  757. updates repeatedly on the same, already pre-loaded data).
  758. Updates the model weights based on the averaged per-device gradients.
  759. Args:
  760. offset: Offset into the preloaded data. Used for pre-loading
  761. a train-batch once to a device, then iterating over
  762. (subsampling through) this batch n times doing minibatch SGD.
  763. buffer_index: The index of the buffer (a MultiGPUTowerStack)
  764. to take the already pre-loaded data from. The number of buffers
  765. on each device depends on the value of the
  766. `num_multi_gpu_tower_stacks` config key.
  767. Returns:
  768. The outputs of extra_ops evaluated over the batch.
  769. """
  770. raise NotImplementedError
  771. @DeveloperAPI
  772. def compute_gradients(
  773. self, postprocessed_batch: SampleBatch
  774. ) -> Tuple[ModelGradients, Dict[str, TensorType]]:
  775. """Computes gradients given a batch of experiences.
  776. Either this in combination with `apply_gradients()` or
  777. `learn_on_batch()` must be implemented by subclasses.
  778. Args:
  779. postprocessed_batch: The SampleBatch object to use
  780. for calculating gradients.
  781. Returns:
  782. grads: List of gradient output values.
  783. grad_info: Extra policy-specific info values.
  784. """
  785. raise NotImplementedError
  786. @DeveloperAPI
  787. def apply_gradients(self, gradients: ModelGradients) -> None:
  788. """Applies the (previously) computed gradients.
  789. Either this in combination with `compute_gradients()` or
  790. `learn_on_batch()` must be implemented by subclasses.
  791. Args:
  792. gradients: The already calculated gradients to apply to this
  793. Policy.
  794. """
  795. raise NotImplementedError
  796. @DeveloperAPI
  797. def get_weights(self) -> ModelWeights:
  798. """Returns model weights.
  799. Note: The return value of this method will reside under the "weights"
  800. key in the return value of Policy.get_state(). Model weights are only
  801. one part of a Policy's state. Other state information contains:
  802. optimizer variables, exploration state, and global state vars such as
  803. the sampling timestep.
  804. Returns:
  805. Serializable copy or view of model weights.
  806. """
  807. raise NotImplementedError
  808. @DeveloperAPI
  809. def set_weights(self, weights: ModelWeights) -> None:
  810. """Sets this Policy's model's weights.
  811. Note: Model weights are only one part of a Policy's state. Other
  812. state information contains: optimizer variables, exploration state,
  813. and global state vars such as the sampling timestep.
  814. Args:
  815. weights: Serializable copy or view of model weights.
  816. """
  817. raise NotImplementedError
  818. @DeveloperAPI
  819. def get_exploration_state(self) -> Dict[str, TensorType]:
  820. """Returns the state of this Policy's exploration component.
  821. Returns:
  822. Serializable information on the `self.exploration` object.
  823. """
  824. return self.exploration.get_state()
  825. @DeveloperAPI
  826. def is_recurrent(self) -> bool:
  827. """Whether this Policy holds a recurrent Model.
  828. Returns:
  829. True if this Policy has-a RNN-based Model.
  830. """
  831. return False
  832. @DeveloperAPI
  833. def num_state_tensors(self) -> int:
  834. """The number of internal states needed by the RNN-Model of the Policy.
  835. Returns:
  836. int: The number of RNN internal states kept by this Policy's Model.
  837. """
  838. return 0
  839. @DeveloperAPI
  840. def get_initial_state(self) -> List[TensorType]:
  841. """Returns initial RNN state for the current policy.
  842. Returns:
  843. List[TensorType]: Initial RNN state for the current policy.
  844. """
  845. return []
  846. @DeveloperAPI
  847. @OverrideToImplementCustomLogic_CallToSuperRecommended
  848. def get_state(self) -> PolicyState:
  849. """Returns the entire current state of this Policy.
  850. Note: Not to be confused with an RNN model's internal state.
  851. State includes the Model(s)' weights, optimizer weights,
  852. the exploration component's state, as well as global variables, such
  853. as sampling timesteps.
  854. Note that the state may contain references to the original variables.
  855. This means that you may need to deepcopy() the state before mutating it.
  856. Returns:
  857. Serialized local state.
  858. """
  859. state = {
  860. # All the policy's weights.
  861. "weights": self.get_weights(),
  862. # The current global timestep.
  863. "global_timestep": self.global_timestep,
  864. # The current num_grad_updates counter.
  865. "num_grad_updates": self.num_grad_updates,
  866. }
  867. # Add this Policy's spec so it can be retreived w/o access to the original
  868. # code.
  869. policy_spec = PolicySpec(
  870. policy_class=type(self),
  871. observation_space=self.observation_space,
  872. action_space=self.action_space,
  873. config=self.config,
  874. )
  875. state["policy_spec"] = policy_spec.serialize()
  876. if self.config.get("enable_connectors", False):
  877. # Checkpoint connectors state as well if enabled.
  878. connector_configs = {}
  879. if self.agent_connectors:
  880. connector_configs["agent"] = self.agent_connectors.to_state()
  881. if self.action_connectors:
  882. connector_configs["action"] = self.action_connectors.to_state()
  883. state["connector_configs"] = connector_configs
  884. return state
  885. @PublicAPI(stability="alpha")
  886. def restore_connectors(self, state: PolicyState):
  887. """Restore agent and action connectors if configs available.
  888. Args:
  889. state: The new state to set this policy to. Can be
  890. obtained by calling `self.get_state()`.
  891. """
  892. # To avoid a circular dependency problem cause by SampleBatch.
  893. from ray.rllib.connectors.util import restore_connectors_for_policy
  894. # No-op if connector is not enabled.
  895. if not self.config.get("enable_connectors", False):
  896. return
  897. connector_configs = state.get("connector_configs", {})
  898. if "agent" in connector_configs:
  899. self.agent_connectors = restore_connectors_for_policy(
  900. self, connector_configs["agent"]
  901. )
  902. logger.debug("restoring agent connectors:")
  903. logger.debug(self.agent_connectors.__str__(indentation=4))
  904. if "action" in connector_configs:
  905. self.action_connectors = restore_connectors_for_policy(
  906. self, connector_configs["action"]
  907. )
  908. logger.debug("restoring action connectors:")
  909. logger.debug(self.action_connectors.__str__(indentation=4))
  910. @DeveloperAPI
  911. @OverrideToImplementCustomLogic_CallToSuperRecommended
  912. def set_state(self, state: PolicyState) -> None:
  913. """Restores the entire current state of this Policy from `state`.
  914. Args:
  915. state: The new state to set this policy to. Can be
  916. obtained by calling `self.get_state()`.
  917. """
  918. if "policy_spec" in state:
  919. policy_spec = PolicySpec.deserialize(state["policy_spec"])
  920. # Assert spaces remained the same.
  921. if (
  922. policy_spec.observation_space is not None
  923. and policy_spec.observation_space != self.observation_space
  924. ):
  925. logger.warning(
  926. "`observation_space` in given policy state ("
  927. f"{policy_spec.observation_space}) does not match this Policy's "
  928. f"observation space ({self.observation_space})."
  929. )
  930. if (
  931. policy_spec.action_space is not None
  932. and policy_spec.action_space != self.action_space
  933. ):
  934. logger.warning(
  935. "`action_space` in given policy state ("
  936. f"{policy_spec.action_space}) does not match this Policy's "
  937. f"action space ({self.action_space})."
  938. )
  939. # Override config, if part of the spec.
  940. if policy_spec.config:
  941. self.config = policy_spec.config
  942. # Override NN weights.
  943. self.set_weights(state["weights"])
  944. self.restore_connectors(state)
  945. @ExperimentalAPI
  946. def apply(
  947. self,
  948. func: Callable[["Policy", Optional[Any], Optional[Any]], T],
  949. *args,
  950. **kwargs,
  951. ) -> T:
  952. """Calls the given function with this Policy instance.
  953. Useful for when the Policy class has been converted into a ActorHandle
  954. and the user needs to execute some functionality (e.g. add a property)
  955. on the underlying policy object.
  956. Args:
  957. func: The function to call, with this Policy as first
  958. argument, followed by args, and kwargs.
  959. args: Optional additional args to pass to the function call.
  960. kwargs: Optional additional kwargs to pass to the function call.
  961. Returns:
  962. The return value of the function call.
  963. """
  964. return func(self, *args, **kwargs)
  965. @DeveloperAPI
  966. def on_global_var_update(self, global_vars: Dict[str, TensorType]) -> None:
  967. """Called on an update to global vars.
  968. Args:
  969. global_vars: Global variables by str key, broadcast from the
  970. driver.
  971. """
  972. # Store the current global time step (sum over all policies' sample
  973. # steps).
  974. # Make sure, we keep global_timestep as a Tensor for tf-eager
  975. # (leads to memory leaks if not doing so).
  976. if self.framework == "tf2":
  977. self.global_timestep.assign(global_vars["timestep"])
  978. else:
  979. self.global_timestep = global_vars["timestep"]
  980. # Update our lifetime gradient update counter.
  981. num_grad_updates = global_vars.get("num_grad_updates")
  982. if num_grad_updates is not None:
  983. self.num_grad_updates = num_grad_updates
  984. @DeveloperAPI
  985. def export_checkpoint(
  986. self,
  987. export_dir: str,
  988. filename_prefix=DEPRECATED_VALUE,
  989. *,
  990. policy_state: Optional[PolicyState] = None,
  991. checkpoint_format: str = "cloudpickle",
  992. ) -> None:
  993. """Exports Policy checkpoint to a local directory and returns an AIR Checkpoint.
  994. Args:
  995. export_dir: Local writable directory to store the AIR Checkpoint
  996. information into.
  997. policy_state: An optional PolicyState to write to disk. Used by
  998. `Algorithm.save_checkpoint()` to save on the additional
  999. `self.get_state()` calls of its different Policies.
  1000. checkpoint_format: Either one of 'cloudpickle' or 'msgpack'.
  1001. Example:
  1002. >>> from ray.rllib.algorithms.ppo import PPOTorchPolicy
  1003. >>> policy = PPOTorchPolicy(...) # doctest: +SKIP
  1004. >>> policy.export_checkpoint("/tmp/export_dir") # doctest: +SKIP
  1005. """
  1006. # `filename_prefix` should not longer be used as new Policy checkpoints
  1007. # contain more than one file with a fixed filename structure.
  1008. if filename_prefix != DEPRECATED_VALUE:
  1009. deprecation_warning(
  1010. old="Policy.export_checkpoint(filename_prefix=...)",
  1011. error=True,
  1012. )
  1013. if checkpoint_format not in ["cloudpickle", "msgpack"]:
  1014. raise ValueError(
  1015. f"Value of `checkpoint_format` ({checkpoint_format}) must either be "
  1016. "'cloudpickle' or 'msgpack'!"
  1017. )
  1018. if policy_state is None:
  1019. policy_state = self.get_state()
  1020. # Write main policy state file.
  1021. os.makedirs(export_dir, exist_ok=True)
  1022. if checkpoint_format == "cloudpickle":
  1023. policy_state["checkpoint_version"] = CHECKPOINT_VERSION
  1024. state_file = "policy_state.pkl"
  1025. with open(os.path.join(export_dir, state_file), "w+b") as f:
  1026. pickle.dump(policy_state, f)
  1027. else:
  1028. from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
  1029. msgpack = try_import_msgpack(error=True)
  1030. policy_state["checkpoint_version"] = str(CHECKPOINT_VERSION)
  1031. # Serialize the config for msgpack dump'ing.
  1032. policy_state["policy_spec"]["config"] = AlgorithmConfig._serialize_dict(
  1033. policy_state["policy_spec"]["config"]
  1034. )
  1035. state_file = "policy_state.msgpck"
  1036. with open(os.path.join(export_dir, state_file), "w+b") as f:
  1037. msgpack.dump(policy_state, f)
  1038. # Write RLlib checkpoint json.
  1039. with open(os.path.join(export_dir, "rllib_checkpoint.json"), "w") as f:
  1040. json.dump(
  1041. {
  1042. "type": "Policy",
  1043. "checkpoint_version": str(policy_state["checkpoint_version"]),
  1044. "format": checkpoint_format,
  1045. "state_file": state_file,
  1046. "ray_version": ray.__version__,
  1047. "ray_commit": ray.__commit__,
  1048. },
  1049. f,
  1050. )
  1051. # Add external model files, if required.
  1052. if self.config["export_native_model_files"]:
  1053. self.export_model(os.path.join(export_dir, "model"))
  1054. @DeveloperAPI
  1055. def export_model(self, export_dir: str, onnx: Optional[int] = None) -> None:
  1056. """Exports the Policy's Model to local directory for serving.
  1057. Note: The file format will depend on the deep learning framework used.
  1058. See the child classed of Policy and their `export_model`
  1059. implementations for more details.
  1060. Args:
  1061. export_dir: Local writable directory.
  1062. onnx: If given, will export model in ONNX format. The
  1063. value of this parameter set the ONNX OpSet version to use.
  1064. Raises:
  1065. ValueError: If a native DL-framework based model (e.g. a keras Model)
  1066. cannot be saved to disk for various reasons.
  1067. """
  1068. raise NotImplementedError
  1069. @DeveloperAPI
  1070. def import_model_from_h5(self, import_file: str) -> None:
  1071. """Imports Policy from local file.
  1072. Args:
  1073. import_file: Local readable file.
  1074. """
  1075. raise NotImplementedError
  1076. @DeveloperAPI
  1077. def get_session(self) -> Optional["tf1.Session"]:
  1078. """Returns tf.Session object to use for computing actions or None.
  1079. Note: This method only applies to TFPolicy sub-classes. All other
  1080. sub-classes should expect a None to be returned from this method.
  1081. Returns:
  1082. The tf Session to use for computing actions and losses with
  1083. this policy or None.
  1084. """
  1085. return None
  1086. def get_host(self) -> str:
  1087. """Returns the computer's network name.
  1088. Returns:
  1089. The computer's networks name or an empty string, if the network
  1090. name could not be determined.
  1091. """
  1092. return platform.node()
  1093. def _get_num_gpus_for_policy(self) -> int:
  1094. """Decide on the number of CPU/GPU nodes this policy should run on.
  1095. Return:
  1096. 0 if policy should run on CPU. >0 if policy should run on 1 or
  1097. more GPUs.
  1098. """
  1099. worker_idx = self.config.get("worker_index", 0)
  1100. fake_gpus = self.config.get("_fake_gpus", False)
  1101. if (
  1102. ray._private.worker._mode() == ray._private.worker.LOCAL_MODE
  1103. and not fake_gpus
  1104. ):
  1105. # If in local debugging mode, and _fake_gpus is not on.
  1106. num_gpus = 0
  1107. elif worker_idx == 0:
  1108. # If we are on the new RLModule/Learner stack, `num_gpus` is deprecated.
  1109. # so use `num_gpus_per_worker` for policy sampling
  1110. # we need this .get() syntax here to ensure backwards compatibility.
  1111. if self.config.get("_enable_learner_api", False):
  1112. num_gpus = self.config["num_gpus_per_worker"]
  1113. else:
  1114. # If head node, take num_gpus.
  1115. num_gpus = self.config["num_gpus"]
  1116. else:
  1117. # If worker node, take num_gpus_per_worker
  1118. num_gpus = self.config["num_gpus_per_worker"]
  1119. if num_gpus == 0:
  1120. dev = "CPU"
  1121. else:
  1122. dev = "{} {}".format(num_gpus, "fake-GPUs" if fake_gpus else "GPUs")
  1123. logger.info(
  1124. "Policy (worker={}) running on {}.".format(
  1125. worker_idx if worker_idx > 0 else "local", dev
  1126. )
  1127. )
  1128. return num_gpus
  1129. def _create_exploration(self) -> Exploration:
  1130. """Creates the Policy's Exploration object.
  1131. This method only exists b/c some Algorithms do not use TfPolicy nor
  1132. TorchPolicy, but inherit directly from Policy. Others inherit from
  1133. TfPolicy w/o using DynamicTFPolicy.
  1134. TODO(sven): unify these cases.
  1135. Returns:
  1136. Exploration: The Exploration object to be used by this Policy.
  1137. """
  1138. if getattr(self, "exploration", None) is not None:
  1139. return self.exploration
  1140. exploration = from_config(
  1141. Exploration,
  1142. self.config.get("exploration_config", {"type": "StochasticSampling"}),
  1143. action_space=self.action_space,
  1144. policy_config=self.config,
  1145. model=getattr(self, "model", None),
  1146. num_workers=self.config.get("num_workers", 0),
  1147. worker_index=self.config.get("worker_index", 0),
  1148. framework=getattr(self, "framework", self.config.get("framework", "tf")),
  1149. )
  1150. return exploration
  1151. def _get_default_view_requirements(self):
  1152. """Returns a default ViewRequirements dict.
  1153. Note: This is the base/maximum requirement dict, from which later
  1154. some requirements will be subtracted again automatically to streamline
  1155. data collection, batch creation, and data transfer.
  1156. Returns:
  1157. ViewReqDict: The default view requirements dict.
  1158. """
  1159. # Default view requirements (equal to those that we would use before
  1160. # the trajectory view API was introduced).
  1161. return {
  1162. SampleBatch.OBS: ViewRequirement(space=self.observation_space),
  1163. SampleBatch.NEXT_OBS: ViewRequirement(
  1164. data_col=SampleBatch.OBS,
  1165. shift=1,
  1166. space=self.observation_space,
  1167. used_for_compute_actions=False,
  1168. ),
  1169. SampleBatch.ACTIONS: ViewRequirement(
  1170. space=self.action_space, used_for_compute_actions=False
  1171. ),
  1172. # For backward compatibility with custom Models that don't specify
  1173. # these explicitly (will be removed by Policy if not used).
  1174. SampleBatch.PREV_ACTIONS: ViewRequirement(
  1175. data_col=SampleBatch.ACTIONS, shift=-1, space=self.action_space
  1176. ),
  1177. SampleBatch.REWARDS: ViewRequirement(),
  1178. # For backward compatibility with custom Models that don't specify
  1179. # these explicitly (will be removed by Policy if not used).
  1180. SampleBatch.PREV_REWARDS: ViewRequirement(
  1181. data_col=SampleBatch.REWARDS, shift=-1
  1182. ),
  1183. SampleBatch.TERMINATEDS: ViewRequirement(),
  1184. SampleBatch.TRUNCATEDS: ViewRequirement(),
  1185. SampleBatch.INFOS: ViewRequirement(used_for_compute_actions=False),
  1186. SampleBatch.EPS_ID: ViewRequirement(),
  1187. SampleBatch.UNROLL_ID: ViewRequirement(),
  1188. SampleBatch.AGENT_INDEX: ViewRequirement(),
  1189. SampleBatch.T: ViewRequirement(),
  1190. }
  1191. def _initialize_loss_from_dummy_batch(
  1192. self,
  1193. auto_remove_unneeded_view_reqs: bool = True,
  1194. stats_fn=None,
  1195. ) -> None:
  1196. """Performs test calls through policy's model and loss.
  1197. NOTE: This base method should work for define-by-run Policies such as
  1198. torch and tf-eager policies.
  1199. If required, will thereby detect automatically, which data views are
  1200. required by a) the forward pass, b) the postprocessing, and c) the loss
  1201. functions, and remove those from self.view_requirements that are not
  1202. necessary for these computations (to save data storage and transfer).
  1203. Args:
  1204. auto_remove_unneeded_view_reqs: Whether to automatically
  1205. remove those ViewRequirements records from
  1206. self.view_requirements that are not needed.
  1207. stats_fn (Optional[Callable[[Policy, SampleBatch], Dict[str,
  1208. TensorType]]]): An optional stats function to be called after
  1209. the loss.
  1210. """
  1211. if self.config.get("_disable_initialize_loss_from_dummy_batch", False):
  1212. return
  1213. # Signal Policy that currently we do not like to eager/jit trace
  1214. # any function calls. This is to be able to track, which columns
  1215. # in the dummy batch are accessed by the different function (e.g.
  1216. # loss) such that we can then adjust our view requirements.
  1217. self._no_tracing = True
  1218. # Save for later so that loss init does not change global timestep
  1219. global_ts_before_init = int(convert_to_numpy(self.global_timestep))
  1220. sample_batch_size = min(
  1221. max(self.batch_divisibility_req * 4, 32),
  1222. self.config["train_batch_size"], # Don't go over the asked batch size.
  1223. )
  1224. self._dummy_batch = self._get_dummy_batch_from_view_requirements(
  1225. sample_batch_size
  1226. )
  1227. self._lazy_tensor_dict(self._dummy_batch)
  1228. # With RL Modules you want the explore flag to be True for initialization
  1229. # of the tensors and placeholder you'd need for training.
  1230. explore = self.config.get("_enable_rl_module_api", False)
  1231. actions, state_outs, extra_outs = self.compute_actions_from_input_dict(
  1232. self._dummy_batch, explore=explore
  1233. )
  1234. if not self.config.get("_enable_rl_module_api", False):
  1235. for key, view_req in self.view_requirements.items():
  1236. if key not in self._dummy_batch.accessed_keys:
  1237. view_req.used_for_compute_actions = False
  1238. # Add all extra action outputs to view reqirements (these may be
  1239. # filtered out later again, if not needed for postprocessing or loss).
  1240. for key, value in extra_outs.items():
  1241. self._dummy_batch[key] = value
  1242. if key not in self.view_requirements:
  1243. if isinstance(value, (dict, np.ndarray)):
  1244. # the assumption is that value is a nested_dict of np.arrays leaves
  1245. space = get_gym_space_from_struct_of_tensors(value)
  1246. self.view_requirements[key] = ViewRequirement(
  1247. space=space, used_for_compute_actions=False
  1248. )
  1249. else:
  1250. raise ValueError(
  1251. "policy.compute_actions_from_input_dict() returns an "
  1252. "extra action output that is neither a numpy array nor a dict."
  1253. )
  1254. for key in self._dummy_batch.accessed_keys:
  1255. if key not in self.view_requirements:
  1256. self.view_requirements[key] = ViewRequirement()
  1257. self.view_requirements[key].used_for_compute_actions = False
  1258. # TODO (kourosh) Why did we use to make used_for_compute_actions True here?
  1259. new_batch = self._get_dummy_batch_from_view_requirements(sample_batch_size)
  1260. # Make sure the dummy_batch will return numpy arrays when accessed
  1261. self._dummy_batch.set_get_interceptor(None)
  1262. # try to re-use the output of the previous run to avoid overriding things that
  1263. # would break (e.g. scale = 0 of Normal distribution cannot be zero)
  1264. for k in new_batch:
  1265. if k not in self._dummy_batch:
  1266. self._dummy_batch[k] = new_batch[k]
  1267. # Make sure the book-keeping of dummy_batch keys are reset to correcly track
  1268. # what is accessed, what is added and what's deleted from now on.
  1269. self._dummy_batch.accessed_keys.clear()
  1270. self._dummy_batch.deleted_keys.clear()
  1271. self._dummy_batch.added_keys.clear()
  1272. if self.exploration:
  1273. # Policies with RLModules don't have an exploration object.
  1274. self.exploration.postprocess_trajectory(self, self._dummy_batch)
  1275. postprocessed_batch = self.postprocess_trajectory(self._dummy_batch)
  1276. seq_lens = None
  1277. if state_outs:
  1278. B = 4 # For RNNs, have B=4, T=[depends on sample_batch_size]
  1279. if self.config.get("_enable_rl_module_api", False):
  1280. sub_batch = postprocessed_batch[:B]
  1281. postprocessed_batch["state_in"] = sub_batch["state_in"]
  1282. postprocessed_batch["state_out"] = sub_batch["state_out"]
  1283. else:
  1284. i = 0
  1285. while "state_in_{}".format(i) in postprocessed_batch:
  1286. postprocessed_batch["state_in_{}".format(i)] = postprocessed_batch[
  1287. "state_in_{}".format(i)
  1288. ][:B]
  1289. if "state_out_{}".format(i) in postprocessed_batch:
  1290. postprocessed_batch[
  1291. "state_out_{}".format(i)
  1292. ] = postprocessed_batch["state_out_{}".format(i)][:B]
  1293. i += 1
  1294. seq_len = sample_batch_size // B
  1295. seq_lens = np.array([seq_len for _ in range(B)], dtype=np.int32)
  1296. postprocessed_batch[SampleBatch.SEQ_LENS] = seq_lens
  1297. if not self.config.get("_enable_learner_api"):
  1298. # Switch on lazy to-tensor conversion on `postprocessed_batch`.
  1299. train_batch = self._lazy_tensor_dict(postprocessed_batch)
  1300. # Calling loss, so set `is_training` to True.
  1301. train_batch.set_training(True)
  1302. if seq_lens is not None:
  1303. train_batch[SampleBatch.SEQ_LENS] = seq_lens
  1304. train_batch.count = self._dummy_batch.count
  1305. # Call the loss function, if it exists.
  1306. # TODO(jungong) : clean up after all agents get migrated.
  1307. # We should simply do self.loss(...) here.
  1308. if self._loss is not None:
  1309. self._loss(self, self.model, self.dist_class, train_batch)
  1310. elif is_overridden(self.loss) and not self.config["in_evaluation"]:
  1311. self.loss(self.model, self.dist_class, train_batch)
  1312. # Call the stats fn, if given.
  1313. # TODO(jungong) : clean up after all agents get migrated.
  1314. # We should simply do self.stats_fn(train_batch) here.
  1315. if stats_fn is not None:
  1316. stats_fn(self, train_batch)
  1317. if hasattr(self, "stats_fn") and not self.config["in_evaluation"]:
  1318. self.stats_fn(train_batch)
  1319. else:
  1320. # This is not needed to run a training with the Learner API, but useful if
  1321. # we want to create a batch of data for training from view requirements.
  1322. for key in set(postprocessed_batch.keys()).difference(
  1323. set(new_batch.keys())
  1324. ):
  1325. # Add all columns generated by postprocessing to view requirements.
  1326. if key not in self.view_requirements and key != SampleBatch.SEQ_LENS:
  1327. self.view_requirements[key] = ViewRequirement(
  1328. used_for_compute_actions=False
  1329. )
  1330. # Re-enable tracing.
  1331. self._no_tracing = False
  1332. # Add new columns automatically to view-reqs.
  1333. if (
  1334. not self.config.get("_enable_learner_api")
  1335. and auto_remove_unneeded_view_reqs
  1336. ):
  1337. # Add those needed for postprocessing and training.
  1338. all_accessed_keys = (
  1339. train_batch.accessed_keys
  1340. | self._dummy_batch.accessed_keys
  1341. | self._dummy_batch.added_keys
  1342. )
  1343. for key in all_accessed_keys:
  1344. if key not in self.view_requirements and key != SampleBatch.SEQ_LENS:
  1345. self.view_requirements[key] = ViewRequirement(
  1346. used_for_compute_actions=False
  1347. )
  1348. if self._loss or is_overridden(self.loss):
  1349. # Tag those only needed for post-processing (with some
  1350. # exceptions).
  1351. for key in self._dummy_batch.accessed_keys:
  1352. if (
  1353. key not in train_batch.accessed_keys
  1354. and key in self.view_requirements
  1355. and key not in self.model.view_requirements
  1356. and key
  1357. not in [
  1358. SampleBatch.EPS_ID,
  1359. SampleBatch.AGENT_INDEX,
  1360. SampleBatch.UNROLL_ID,
  1361. SampleBatch.TERMINATEDS,
  1362. SampleBatch.TRUNCATEDS,
  1363. SampleBatch.REWARDS,
  1364. SampleBatch.INFOS,
  1365. SampleBatch.T,
  1366. ]
  1367. ):
  1368. self.view_requirements[key].used_for_training = False
  1369. # Remove those not needed at all (leave those that are needed
  1370. # by Sampler to properly execute sample collection). Also always leave
  1371. # TERMINATEDS, TRUNCATEDS, REWARDS, INFOS, no matter what.
  1372. for key in list(self.view_requirements.keys()):
  1373. if (
  1374. key not in all_accessed_keys
  1375. and key
  1376. not in [
  1377. SampleBatch.EPS_ID,
  1378. SampleBatch.AGENT_INDEX,
  1379. SampleBatch.UNROLL_ID,
  1380. SampleBatch.TERMINATEDS,
  1381. SampleBatch.TRUNCATEDS,
  1382. SampleBatch.REWARDS,
  1383. SampleBatch.INFOS,
  1384. SampleBatch.T,
  1385. ]
  1386. and key not in self.model.view_requirements
  1387. ):
  1388. # If user deleted this key manually in postprocessing
  1389. # fn, warn about it and do not remove from
  1390. # view-requirements.
  1391. if key in self._dummy_batch.deleted_keys:
  1392. logger.warning(
  1393. "SampleBatch key '{}' was deleted manually in "
  1394. "postprocessing function! RLlib will "
  1395. "automatically remove non-used items from the "
  1396. "data stream. Remove the `del` from your "
  1397. "postprocessing function.".format(key)
  1398. )
  1399. # If we are not writing output to disk, save to erase
  1400. # this key to save space in the sample batch.
  1401. elif self.config["output"] is None:
  1402. del self.view_requirements[key]
  1403. if type(self.global_timestep) is int:
  1404. self.global_timestep = global_ts_before_init
  1405. elif isinstance(self.global_timestep, tf.Variable):
  1406. self.global_timestep.assign(global_ts_before_init)
  1407. else:
  1408. raise ValueError(
  1409. "Variable self.global_timestep of policy {} needs to be "
  1410. "either of type `int` or `tf.Variable`, "
  1411. "but is of type {}.".format(self, type(self.global_timestep))
  1412. )
  1413. @ExperimentalAPI
  1414. def maybe_add_time_dimension(
  1415. self,
  1416. input_dict: Dict[str, TensorType],
  1417. seq_lens: TensorType,
  1418. framework: str = None,
  1419. ):
  1420. """Adds a time dimension for recurrent RLModules.
  1421. Args:
  1422. input_dict: The input dict.
  1423. seq_lens: The sequence lengths.
  1424. framework: The framework to use for adding the time dimensions.
  1425. If None, will default to the framework of the policy.
  1426. Returns:
  1427. The input dict, with a possibly added time dimension.
  1428. """
  1429. # We need to check for hasattr(self, "model") because a dummy Policy may not
  1430. # have a model.
  1431. if (
  1432. self.config.get("_enable_rl_module_api", False)
  1433. and hasattr(self, "model")
  1434. and self.model.is_stateful()
  1435. ):
  1436. # Note that this is a temporary workaround to fit the old sampling stack
  1437. # to RL Modules.
  1438. ret = {}
  1439. framework = framework or self.model.framework
  1440. def _add_time_dimension(inputs):
  1441. inputs = add_time_dimension(
  1442. inputs,
  1443. seq_lens=seq_lens,
  1444. framework=framework,
  1445. time_major=self.config.get("model", {}).get("_time_major", False),
  1446. )
  1447. return inputs
  1448. def _add_state_out_time_dimension(inputs):
  1449. # We do a hack here in that we add a time dimension,
  1450. # even though the tensor already has one. Then, we remove the
  1451. # original time dimension.
  1452. v_w_two_time_dims = _add_time_dimension(inputs)
  1453. if framework == "tf2":
  1454. return tf.squeeze(v_w_two_time_dims, axis=2)
  1455. elif framework == "torch":
  1456. # Remove second time dimensions
  1457. return torch.squeeze(v_w_two_time_dims, axis=2)
  1458. elif framework == "np":
  1459. shape = v_w_two_time_dims.shape
  1460. padded_batch_dim = shape[0]
  1461. padded_time_dim = shape[1]
  1462. other_dims = shape[3:]
  1463. new_shape = (padded_batch_dim, padded_time_dim) + other_dims
  1464. return v_w_two_time_dims.reshape(new_shape)
  1465. else:
  1466. raise ValueError(f"Framework {framework} not implemented!")
  1467. for k, v in input_dict.items():
  1468. if k == SampleBatch.INFOS:
  1469. ret[k] = _add_time_dimension(v)
  1470. elif k == SampleBatch.SEQ_LENS:
  1471. # sequence lengths have no time dimension
  1472. ret[k] = v
  1473. elif k == STATE_IN:
  1474. # Assume that batch_repeat_value is max seq len.
  1475. # This is commonly the case for STATE_IN
  1476. # Values should already have correct batch and time dimension
  1477. assert self.view_requirements[k].batch_repeat_value != 1
  1478. ret[k] = v
  1479. elif k == STATE_OUT:
  1480. # Assume that batch_repeat_value is 1
  1481. # This is commonly the case for STATE_OUT
  1482. assert self.view_requirements[k].batch_repeat_value == 1
  1483. ret[k] = tree.map_structure(_add_state_out_time_dimension, v)
  1484. else:
  1485. ret[k] = tree.map_structure(_add_time_dimension, v)
  1486. return SampleBatch(ret)
  1487. else:
  1488. return input_dict
  1489. @ExperimentalAPI
  1490. def maybe_remove_time_dimension(self, input_dict: Dict[str, TensorType]):
  1491. """Removes a time dimension for recurrent RLModules.
  1492. Args:
  1493. input_dict: The input dict.
  1494. Returns:
  1495. The input dict with a possibly removed time dimension.
  1496. """
  1497. raise NotImplementedError
  1498. def _get_dummy_batch_from_view_requirements(
  1499. self, batch_size: int = 1
  1500. ) -> SampleBatch:
  1501. """Creates a numpy dummy batch based on the Policy's view requirements.
  1502. Args:
  1503. batch_size: The size of the batch to create.
  1504. Returns:
  1505. Dict[str, TensorType]: The dummy batch containing all zero values.
  1506. """
  1507. ret = {}
  1508. for view_col, view_req in self.view_requirements.items():
  1509. data_col = view_req.data_col or view_col
  1510. # Flattened dummy batch.
  1511. if (isinstance(view_req.space, (gym.spaces.Tuple, gym.spaces.Dict))) and (
  1512. (
  1513. data_col == SampleBatch.OBS
  1514. and not self.config["_disable_preprocessor_api"]
  1515. )
  1516. or (
  1517. data_col == SampleBatch.ACTIONS
  1518. and not self.config.get("_disable_action_flattening")
  1519. )
  1520. ):
  1521. _, shape = ModelCatalog.get_action_shape(
  1522. view_req.space, framework=self.config["framework"]
  1523. )
  1524. ret[view_col] = np.zeros((batch_size,) + shape[1:], np.float32)
  1525. # Non-flattened dummy batch.
  1526. else:
  1527. # Range of indices on time-axis, e.g. "-50:-1".
  1528. if isinstance(view_req.space, gym.spaces.Space):
  1529. time_size = (
  1530. len(view_req.shift_arr) if len(view_req.shift_arr) > 1 else None
  1531. )
  1532. ret[view_col] = get_dummy_batch_for_space(
  1533. view_req.space, batch_size=batch_size, time_size=time_size
  1534. )
  1535. else:
  1536. ret[view_col] = [view_req.space for _ in range(batch_size)]
  1537. # Due to different view requirements for the different columns,
  1538. # columns in the resulting batch may not all have the same batch size.
  1539. return SampleBatch(ret)
  1540. def _update_model_view_requirements_from_init_state(self):
  1541. """Uses Model's (or this Policy's) init state to add needed ViewReqs.
  1542. Can be called from within a Policy to make sure RNNs automatically
  1543. update their internal state-related view requirements.
  1544. Changes the `self.view_requirements` dict.
  1545. """
  1546. self._model_init_state_automatically_added = True
  1547. model = getattr(self, "model", None)
  1548. obj = model or self
  1549. if model and not hasattr(model, "view_requirements"):
  1550. model.view_requirements = {
  1551. SampleBatch.OBS: ViewRequirement(space=self.observation_space)
  1552. }
  1553. view_reqs = obj.view_requirements
  1554. # Add state-ins to this model's view.
  1555. init_state = []
  1556. if hasattr(obj, "get_initial_state") and callable(obj.get_initial_state):
  1557. init_state = obj.get_initial_state()
  1558. else:
  1559. # Add this functionality automatically for new native model API.
  1560. if (
  1561. tf
  1562. and isinstance(model, tf.keras.Model)
  1563. and "state_in_0" not in view_reqs
  1564. ):
  1565. obj.get_initial_state = lambda: [
  1566. np.zeros_like(view_req.space.sample())
  1567. for k, view_req in model.view_requirements.items()
  1568. if k.startswith("state_in_")
  1569. ]
  1570. else:
  1571. obj.get_initial_state = lambda: []
  1572. if "state_in_0" in view_reqs:
  1573. self.is_recurrent = lambda: True
  1574. # Make sure auto-generated init-state view requirements get added
  1575. # to both Policy and Model, no matter what.
  1576. view_reqs = [view_reqs] + (
  1577. [self.view_requirements] if hasattr(self, "view_requirements") else []
  1578. )
  1579. for i, state in enumerate(init_state):
  1580. # Allow `state` to be either a Space (use zeros as initial values)
  1581. # or any value (e.g. a dict or a non-zero tensor).
  1582. fw = (
  1583. np
  1584. if isinstance(state, np.ndarray)
  1585. else torch
  1586. if torch and torch.is_tensor(state)
  1587. else None
  1588. )
  1589. if fw:
  1590. space = (
  1591. Box(-1.0, 1.0, shape=state.shape) if fw.all(state == 0.0) else state
  1592. )
  1593. else:
  1594. space = state
  1595. for vr in view_reqs:
  1596. # Only override if user has not already provided
  1597. # custom view-requirements for state_in_n.
  1598. if "state_in_{}".format(i) not in vr:
  1599. vr["state_in_{}".format(i)] = ViewRequirement(
  1600. "state_out_{}".format(i),
  1601. shift=-1,
  1602. used_for_compute_actions=True,
  1603. batch_repeat_value=self.config.get("model", {}).get(
  1604. "max_seq_len", 1
  1605. ),
  1606. space=space,
  1607. )
  1608. # Only override if user has not already provided
  1609. # custom view-requirements for state_out_n.
  1610. if "state_out_{}".format(i) not in vr:
  1611. vr["state_out_{}".format(i)] = ViewRequirement(
  1612. space=space, used_for_training=True
  1613. )
  1614. @DeveloperAPI
  1615. def __repr__(self):
  1616. return type(self).__name__
  1617. @Deprecated(new="get_exploration_state", error=True)
  1618. def get_exploration_info(self) -> Dict[str, TensorType]:
  1619. return self.get_exploration_state()
  1620. @DeveloperAPI
  1621. def get_gym_space_from_struct_of_tensors(
  1622. value: Union[Mapping, Tuple, List, TensorType],
  1623. batched_input=True,
  1624. ) -> gym.Space:
  1625. start_idx = 1 if batched_input else 0
  1626. struct = tree.map_structure(
  1627. lambda x: gym.spaces.Box(
  1628. -1.0, 1.0, shape=x.shape[start_idx:], dtype=get_np_dtype(x)
  1629. ),
  1630. value,
  1631. )
  1632. space = get_gym_space_from_struct_of_spaces(struct)
  1633. return space
  1634. @DeveloperAPI
  1635. def get_gym_space_from_struct_of_spaces(value: Union[Dict, Tuple]) -> gym.spaces.Dict:
  1636. if isinstance(value, Mapping):
  1637. return gym.spaces.Dict(
  1638. {k: get_gym_space_from_struct_of_spaces(v) for k, v in value.items()}
  1639. )
  1640. elif isinstance(value, (tuple, list)):
  1641. return gym.spaces.Tuple([get_gym_space_from_struct_of_spaces(v) for v in value])
  1642. else:
  1643. assert isinstance(value, gym.spaces.Space), (
  1644. f"The struct of spaces should only contain dicts, tiples and primitive "
  1645. f"gym spaces. Space is of type {type(value)}"
  1646. )
  1647. return value