dynamic_tf_policy.py 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187
  1. from collections import namedtuple, OrderedDict
  2. import gym
  3. import logging
  4. import re
  5. from typing import Callable, Dict, List, Optional, Tuple, Type
  6. from ray.util.debug import log_once
  7. from ray.rllib.models.tf.tf_action_dist import TFActionDistribution
  8. from ray.rllib.models.modelv2 import ModelV2
  9. from ray.rllib.policy.policy import Policy
  10. from ray.rllib.policy.sample_batch import SampleBatch
  11. from ray.rllib.policy.tf_policy import TFPolicy
  12. from ray.rllib.policy.view_requirement import ViewRequirement
  13. from ray.rllib.models.catalog import ModelCatalog
  14. from ray.rllib.utils import force_list
  15. from ray.rllib.utils.annotations import override, DeveloperAPI
  16. from ray.rllib.utils.debug import summarize
  17. from ray.rllib.utils.deprecation import deprecation_warning, DEPRECATED_VALUE
  18. from ray.rllib.utils.framework import try_import_tf
  19. from ray.rllib.utils.spaces.space_utils import get_dummy_batch_for_space
  20. from ray.rllib.utils.tf_utils import get_placeholder
  21. from ray.rllib.utils.typing import LocalOptimizer, ModelGradients, \
  22. TensorType, TrainerConfigDict
  23. tf1, tf, tfv = try_import_tf()
  24. logger = logging.getLogger(__name__)
  25. # Variable scope in which created variables will be placed under.
  26. TOWER_SCOPE_NAME = "tower"
  27. @DeveloperAPI
  28. class DynamicTFPolicy(TFPolicy):
  29. """A TFPolicy that auto-defines placeholders dynamically at runtime.
  30. Do not sub-class this class directly (neither should you sub-class
  31. TFPolicy), but rather use rllib.policy.tf_policy_template.build_tf_policy
  32. to generate your custom tf (graph-mode or eager) Policy classes.
  33. """
  34. @DeveloperAPI
  35. def __init__(
  36. self,
  37. obs_space: gym.spaces.Space,
  38. action_space: gym.spaces.Space,
  39. config: TrainerConfigDict,
  40. loss_fn: Callable[[
  41. Policy, ModelV2, Type[TFActionDistribution], SampleBatch
  42. ], TensorType],
  43. *,
  44. stats_fn: Optional[Callable[[Policy, SampleBatch], Dict[
  45. str, TensorType]]] = None,
  46. grad_stats_fn: Optional[Callable[[
  47. Policy, SampleBatch, ModelGradients
  48. ], Dict[str, TensorType]]] = None,
  49. before_loss_init: Optional[Callable[[
  50. Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
  51. ], None]] = None,
  52. make_model: Optional[Callable[[
  53. Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
  54. ], ModelV2]] = None,
  55. action_sampler_fn: Optional[Callable[[
  56. TensorType, List[TensorType]
  57. ], Tuple[TensorType, TensorType]]] = None,
  58. action_distribution_fn: Optional[Callable[[
  59. Policy, ModelV2, TensorType, TensorType, TensorType
  60. ], Tuple[TensorType, type, List[TensorType]]]] = None,
  61. existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None,
  62. existing_model: Optional[ModelV2] = None,
  63. get_batch_divisibility_req: Optional[Callable[[Policy],
  64. int]] = None,
  65. obs_include_prev_action_reward=DEPRECATED_VALUE):
  66. """Initializes a DynamicTFPolicy instance.
  67. Initialization of this class occurs in two phases and defines the
  68. static graph.
  69. Phase 1: The model is created and model variables are initialized.
  70. Phase 2: A fake batch of data is created, sent to the trajectory
  71. postprocessor, and then used to create placeholders for the loss
  72. function. The loss and stats functions are initialized with these
  73. placeholders.
  74. Args:
  75. observation_space: Observation space of the policy.
  76. action_space: Action space of the policy.
  77. config: Policy-specific configuration data.
  78. loss_fn: Function that returns a loss tensor for the policy graph.
  79. stats_fn: Optional callable that - given the policy and batch
  80. input tensors - returns a dict mapping str to TF ops.
  81. These ops are fetched from the graph after loss calculations
  82. and the resulting values can be found in the results dict
  83. returned by e.g. `Trainer.train()` or in tensorboard (if TB
  84. logging is enabled).
  85. grad_stats_fn: Optional callable that - given the policy, batch
  86. input tensors, and calculated loss gradient tensors - returns
  87. a dict mapping str to TF ops. These ops are fetched from the
  88. graph after loss and gradient calculations and the resulting
  89. values can be found in the results dict returned by e.g.
  90. `Trainer.train()` or in tensorboard (if TB logging is
  91. enabled).
  92. before_loss_init: Optional function to run prior to
  93. loss init that takes the same arguments as __init__.
  94. make_model: Optional function that returns a ModelV2 object
  95. given policy, obs_space, action_space, and policy config.
  96. All policy variables should be created in this function. If not
  97. specified, a default model will be created.
  98. action_sampler_fn: A callable returning a sampled action and its
  99. log-likelihood given Policy, ModelV2, observation inputs,
  100. explore, and is_training.
  101. Provide `action_sampler_fn` if you would like to have full
  102. control over the action computation step, including the
  103. model forward pass, possible sampling from a distribution,
  104. and exploration logic.
  105. Note: If `action_sampler_fn` is given, `action_distribution_fn`
  106. must be None. If both `action_sampler_fn` and
  107. `action_distribution_fn` are None, RLlib will simply pass
  108. inputs through `self.model` to get distribution inputs, create
  109. the distribution object, sample from it, and apply some
  110. exploration logic to the results.
  111. The callable takes as inputs: Policy, ModelV2, obs_batch,
  112. state_batches (optional), seq_lens (optional),
  113. prev_actions_batch (optional), prev_rewards_batch (optional),
  114. explore, and is_training.
  115. action_distribution_fn: A callable returning distribution inputs
  116. (parameters), a dist-class to generate an action distribution
  117. object from, and internal-state outputs (or an empty list if
  118. not applicable).
  119. Provide `action_distribution_fn` if you would like to only
  120. customize the model forward pass call. The resulting
  121. distribution parameters are then used by RLlib to create a
  122. distribution object, sample from it, and execute any
  123. exploration logic.
  124. Note: If `action_distribution_fn` is given, `action_sampler_fn`
  125. must be None. If both `action_sampler_fn` and
  126. `action_distribution_fn` are None, RLlib will simply pass
  127. inputs through `self.model` to get distribution inputs, create
  128. the distribution object, sample from it, and apply some
  129. exploration logic to the results.
  130. The callable takes as inputs: Policy, ModelV2, input_dict,
  131. explore, timestep, is_training.
  132. existing_inputs: When copying a policy, this specifies an existing
  133. dict of placeholders to use instead of defining new ones.
  134. existing_model: When copying a policy, this specifies an existing
  135. model to clone and share weights with.
  136. get_batch_divisibility_req: Optional callable that returns the
  137. divisibility requirement for sample batches. If None, will
  138. assume a value of 1.
  139. """
  140. if obs_include_prev_action_reward != DEPRECATED_VALUE:
  141. deprecation_warning(
  142. old="obs_include_prev_action_reward", error=False)
  143. self.observation_space = obs_space
  144. self.action_space = action_space
  145. self.config = config
  146. self.framework = "tf"
  147. self._loss_fn = loss_fn
  148. self._stats_fn = stats_fn
  149. self._grad_stats_fn = grad_stats_fn
  150. self._seq_lens = None
  151. self._is_tower = existing_inputs is not None
  152. dist_class = None
  153. if action_sampler_fn or action_distribution_fn:
  154. if not make_model:
  155. raise ValueError(
  156. "`make_model` is required if `action_sampler_fn` OR "
  157. "`action_distribution_fn` is given")
  158. else:
  159. dist_class, logit_dim = ModelCatalog.get_action_dist(
  160. action_space, self.config["model"])
  161. # Setup self.model.
  162. if existing_model:
  163. if isinstance(existing_model, list):
  164. self.model = existing_model[0]
  165. # TODO: (sven) hack, but works for `target_[q_]?model`.
  166. for i in range(1, len(existing_model)):
  167. setattr(self, existing_model[i][0], existing_model[i][1])
  168. elif make_model:
  169. self.model = make_model(self, obs_space, action_space, config)
  170. else:
  171. self.model = ModelCatalog.get_model_v2(
  172. obs_space=obs_space,
  173. action_space=action_space,
  174. num_outputs=logit_dim,
  175. model_config=self.config["model"],
  176. framework="tf")
  177. # Auto-update model's inference view requirements, if recurrent.
  178. self._update_model_view_requirements_from_init_state()
  179. # Input placeholders already given -> Use these.
  180. if existing_inputs:
  181. self._state_inputs = [
  182. v for k, v in existing_inputs.items()
  183. if k.startswith("state_in_")
  184. ]
  185. # Placeholder for RNN time-chunk valid lengths.
  186. if self._state_inputs:
  187. self._seq_lens = existing_inputs[SampleBatch.SEQ_LENS]
  188. # Create new input placeholders.
  189. else:
  190. self._state_inputs = [
  191. get_placeholder(
  192. space=vr.space,
  193. time_axis=not isinstance(vr.shift, int),
  194. name=k,
  195. ) for k, vr in self.model.view_requirements.items()
  196. if k.startswith("state_in_")
  197. ]
  198. # Placeholder for RNN time-chunk valid lengths.
  199. if self._state_inputs:
  200. self._seq_lens = tf1.placeholder(
  201. dtype=tf.int32, shape=[None], name="seq_lens")
  202. # Use default settings.
  203. # Add NEXT_OBS, STATE_IN_0.., and others.
  204. self.view_requirements = self._get_default_view_requirements()
  205. # Combine view_requirements for Model and Policy.
  206. self.view_requirements.update(self.model.view_requirements)
  207. # Disable env-info placeholder.
  208. if SampleBatch.INFOS in self.view_requirements:
  209. self.view_requirements[SampleBatch.INFOS].used_for_training = False
  210. # Setup standard placeholders.
  211. if self._is_tower:
  212. timestep = existing_inputs["timestep"]
  213. explore = False
  214. self._input_dict, self._dummy_batch = \
  215. self._get_input_dict_and_dummy_batch(
  216. self.view_requirements, existing_inputs)
  217. else:
  218. if not self.config.get("_disable_action_flattening"):
  219. action_ph = ModelCatalog.get_action_placeholder(action_space)
  220. prev_action_ph = {}
  221. if SampleBatch.PREV_ACTIONS not in self.view_requirements:
  222. prev_action_ph = {
  223. SampleBatch.PREV_ACTIONS: ModelCatalog.
  224. get_action_placeholder(action_space, "prev_action")
  225. }
  226. self._input_dict, self._dummy_batch = \
  227. self._get_input_dict_and_dummy_batch(
  228. self.view_requirements,
  229. dict({SampleBatch.ACTIONS: action_ph},
  230. **prev_action_ph))
  231. else:
  232. self._input_dict, self._dummy_batch = \
  233. self._get_input_dict_and_dummy_batch(
  234. self.view_requirements, {})
  235. # Placeholder for (sampling steps) timestep (int).
  236. timestep = tf1.placeholder_with_default(
  237. tf.zeros((), dtype=tf.int64), (), name="timestep")
  238. # Placeholder for `is_exploring` flag.
  239. explore = tf1.placeholder_with_default(
  240. True, (), name="is_exploring")
  241. # Placeholder for `is_training` flag.
  242. self._input_dict.set_training(self._get_is_training_placeholder())
  243. # Multi-GPU towers do not need any action computing/exploration
  244. # graphs.
  245. sampled_action = None
  246. sampled_action_logp = None
  247. dist_inputs = None
  248. extra_action_fetches = {}
  249. self._state_out = None
  250. if not self._is_tower:
  251. # Create the Exploration object to use for this Policy.
  252. self.exploration = self._create_exploration()
  253. # Fully customized action generation (e.g., custom policy).
  254. if action_sampler_fn:
  255. sampled_action, sampled_action_logp = action_sampler_fn(
  256. self,
  257. self.model,
  258. obs_batch=self._input_dict[SampleBatch.CUR_OBS],
  259. state_batches=self._state_inputs,
  260. seq_lens=self._seq_lens,
  261. prev_action_batch=self._input_dict.get(
  262. SampleBatch.PREV_ACTIONS),
  263. prev_reward_batch=self._input_dict.get(
  264. SampleBatch.PREV_REWARDS),
  265. explore=explore,
  266. is_training=self._input_dict.is_training)
  267. # Distribution generation is customized, e.g., DQN, DDPG.
  268. else:
  269. if action_distribution_fn:
  270. # Try new action_distribution_fn signature, supporting
  271. # state_batches and seq_lens.
  272. in_dict = self._input_dict
  273. try:
  274. dist_inputs, dist_class, self._state_out = \
  275. action_distribution_fn(
  276. self,
  277. self.model,
  278. input_dict=in_dict,
  279. state_batches=self._state_inputs,
  280. seq_lens=self._seq_lens,
  281. explore=explore,
  282. timestep=timestep,
  283. is_training=in_dict.is_training)
  284. # Trying the old way (to stay backward compatible).
  285. # TODO: Remove in future.
  286. except TypeError as e:
  287. if "positional argument" in e.args[0] or \
  288. "unexpected keyword argument" in e.args[0]:
  289. dist_inputs, dist_class, self._state_out = \
  290. action_distribution_fn(
  291. self, self.model,
  292. obs_batch=in_dict[SampleBatch.CUR_OBS],
  293. state_batches=self._state_inputs,
  294. seq_lens=self._seq_lens,
  295. prev_action_batch=in_dict.get(
  296. SampleBatch.PREV_ACTIONS),
  297. prev_reward_batch=in_dict.get(
  298. SampleBatch.PREV_REWARDS),
  299. explore=explore,
  300. is_training=in_dict.is_training)
  301. else:
  302. raise e
  303. # Default distribution generation behavior:
  304. # Pass through model. E.g., PG, PPO.
  305. else:
  306. if isinstance(self.model, tf.keras.Model):
  307. dist_inputs, self._state_out, \
  308. extra_action_fetches = \
  309. self.model(self._input_dict)
  310. else:
  311. dist_inputs, self._state_out = self.model(
  312. self._input_dict)
  313. action_dist = dist_class(dist_inputs, self.model)
  314. # Using exploration to get final action (e.g. via sampling).
  315. sampled_action, sampled_action_logp = \
  316. self.exploration.get_exploration_action(
  317. action_distribution=action_dist,
  318. timestep=timestep,
  319. explore=explore)
  320. if dist_inputs is not None:
  321. extra_action_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs
  322. if sampled_action_logp is not None:
  323. extra_action_fetches[SampleBatch.ACTION_LOGP] = sampled_action_logp
  324. extra_action_fetches[SampleBatch.ACTION_PROB] = \
  325. tf.exp(tf.cast(sampled_action_logp, tf.float32))
  326. # Phase 1 init.
  327. sess = tf1.get_default_session() or tf1.Session(
  328. config=tf1.ConfigProto(**self.config["tf_session_args"]))
  329. batch_divisibility_req = get_batch_divisibility_req(self) if \
  330. callable(get_batch_divisibility_req) else \
  331. (get_batch_divisibility_req or 1)
  332. prev_action_input = self._input_dict[SampleBatch.PREV_ACTIONS] if \
  333. SampleBatch.PREV_ACTIONS in self._input_dict.accessed_keys \
  334. else None
  335. prev_reward_input = self._input_dict[SampleBatch.PREV_REWARDS] if \
  336. SampleBatch.PREV_REWARDS in self._input_dict.accessed_keys \
  337. else None
  338. super().__init__(
  339. observation_space=obs_space,
  340. action_space=action_space,
  341. config=config,
  342. sess=sess,
  343. obs_input=self._input_dict[SampleBatch.OBS],
  344. action_input=self._input_dict[SampleBatch.ACTIONS],
  345. sampled_action=sampled_action,
  346. sampled_action_logp=sampled_action_logp,
  347. dist_inputs=dist_inputs,
  348. dist_class=dist_class,
  349. loss=None, # dynamically initialized on run
  350. loss_inputs=[],
  351. model=self.model,
  352. state_inputs=self._state_inputs,
  353. state_outputs=self._state_out,
  354. prev_action_input=prev_action_input,
  355. prev_reward_input=prev_reward_input,
  356. seq_lens=self._seq_lens,
  357. max_seq_len=config["model"]["max_seq_len"],
  358. batch_divisibility_req=batch_divisibility_req,
  359. explore=explore,
  360. timestep=timestep)
  361. # Phase 2 init.
  362. if before_loss_init is not None:
  363. before_loss_init(self, obs_space, action_space, config)
  364. if hasattr(self, "_extra_action_fetches"):
  365. self._extra_action_fetches.update(extra_action_fetches)
  366. else:
  367. self._extra_action_fetches = extra_action_fetches
  368. # Loss initialization and model/postprocessing test calls.
  369. if not self._is_tower:
  370. self._initialize_loss_from_dummy_batch(
  371. auto_remove_unneeded_view_reqs=True)
  372. # Create MultiGPUTowerStacks, if we have at least one actual
  373. # GPU or >1 CPUs (fake GPUs).
  374. if len(self.devices) > 1 or any("gpu" in d for d in self.devices):
  375. # Per-GPU graph copies created here must share vars with the
  376. # policy. Therefore, `reuse` is set to tf1.AUTO_REUSE because
  377. # Adam nodes are created after all of the device copies are
  378. # created.
  379. with tf1.variable_scope("", reuse=tf1.AUTO_REUSE):
  380. self.multi_gpu_tower_stacks = [
  381. TFMultiGPUTowerStack(policy=self) for i in range(
  382. self.config.get("num_multi_gpu_tower_stacks", 1))
  383. ]
  384. # Initialize again after loss and tower init.
  385. self.get_session().run(tf1.global_variables_initializer())
  386. @override(TFPolicy)
  387. @DeveloperAPI
  388. def copy(self,
  389. existing_inputs: List[Tuple[str, "tf1.placeholder"]]) -> TFPolicy:
  390. """Creates a copy of self using existing input placeholders."""
  391. # Note that there might be RNN state inputs at the end of the list
  392. if len(self._loss_input_dict) != len(existing_inputs):
  393. raise ValueError("Tensor list mismatch", self._loss_input_dict,
  394. self._state_inputs, existing_inputs)
  395. for i, (k, v) in enumerate(self._loss_input_dict_no_rnn.items()):
  396. if v.shape.as_list() != existing_inputs[i].shape.as_list():
  397. raise ValueError("Tensor shape mismatch", i, k, v.shape,
  398. existing_inputs[i].shape)
  399. # By convention, the loss inputs are followed by state inputs and then
  400. # the seq len tensor.
  401. rnn_inputs = []
  402. for i in range(len(self._state_inputs)):
  403. rnn_inputs.append(
  404. ("state_in_{}".format(i),
  405. existing_inputs[len(self._loss_input_dict_no_rnn) + i]))
  406. if rnn_inputs:
  407. rnn_inputs.append((SampleBatch.SEQ_LENS, existing_inputs[-1]))
  408. input_dict = OrderedDict(
  409. [("is_exploring", self._is_exploring), ("timestep",
  410. self._timestep)] +
  411. [(k, existing_inputs[i])
  412. for i, k in enumerate(self._loss_input_dict_no_rnn.keys())] +
  413. rnn_inputs)
  414. instance = self.__class__(
  415. self.observation_space,
  416. self.action_space,
  417. self.config,
  418. existing_inputs=input_dict,
  419. existing_model=[
  420. self.model,
  421. # Deprecated: Target models should all reside under
  422. # `policy.target_model` now.
  423. ("target_q_model", getattr(self, "target_q_model", None)),
  424. ("target_model", getattr(self, "target_model", None)),
  425. ])
  426. instance._loss_input_dict = input_dict
  427. losses = instance._do_loss_init(SampleBatch(input_dict))
  428. loss_inputs = [
  429. (k, existing_inputs[i])
  430. for i, k in enumerate(self._loss_input_dict_no_rnn.keys())
  431. ]
  432. TFPolicy._initialize_loss(instance, losses, loss_inputs)
  433. if instance._grad_stats_fn:
  434. instance._stats_fetches.update(
  435. instance._grad_stats_fn(instance, input_dict, instance._grads))
  436. return instance
  437. @override(Policy)
  438. @DeveloperAPI
  439. def get_initial_state(self) -> List[TensorType]:
  440. if self.model:
  441. return self.model.get_initial_state()
  442. else:
  443. return []
  444. @override(Policy)
  445. @DeveloperAPI
  446. def load_batch_into_buffer(
  447. self,
  448. batch: SampleBatch,
  449. buffer_index: int = 0,
  450. ) -> int:
  451. # Set the is_training flag of the batch.
  452. batch.set_training(True)
  453. # Shortcut for 1 CPU only: Store batch in
  454. # `self._loaded_single_cpu_batch`.
  455. if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
  456. assert buffer_index == 0
  457. self._loaded_single_cpu_batch = batch
  458. return len(batch)
  459. input_dict = self._get_loss_inputs_dict(batch, shuffle=False)
  460. data_keys = list(self._loss_input_dict_no_rnn.values())
  461. if self._state_inputs:
  462. state_keys = self._state_inputs + [self._seq_lens]
  463. else:
  464. state_keys = []
  465. inputs = [input_dict[k] for k in data_keys]
  466. state_inputs = [input_dict[k] for k in state_keys]
  467. return self.multi_gpu_tower_stacks[buffer_index].load_data(
  468. sess=self.get_session(),
  469. inputs=inputs,
  470. state_inputs=state_inputs,
  471. )
  472. @override(Policy)
  473. @DeveloperAPI
  474. def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int:
  475. # Shortcut for 1 CPU only: Batch should already be stored in
  476. # `self._loaded_single_cpu_batch`.
  477. if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
  478. assert buffer_index == 0
  479. return len(self._loaded_single_cpu_batch) if \
  480. self._loaded_single_cpu_batch is not None else 0
  481. return self.multi_gpu_tower_stacks[buffer_index].num_tuples_loaded
  482. @override(Policy)
  483. @DeveloperAPI
  484. def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0):
  485. # Shortcut for 1 CPU only: Batch should already be stored in
  486. # `self._loaded_single_cpu_batch`.
  487. if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
  488. assert buffer_index == 0
  489. if self._loaded_single_cpu_batch is None:
  490. raise ValueError(
  491. "Must call Policy.load_batch_into_buffer() before "
  492. "Policy.learn_on_loaded_batch()!")
  493. # Get the correct slice of the already loaded batch to use,
  494. # based on offset and batch size.
  495. batch_size = self.config.get("sgd_minibatch_size",
  496. self.config["train_batch_size"])
  497. if batch_size >= len(self._loaded_single_cpu_batch):
  498. sliced_batch = self._loaded_single_cpu_batch
  499. else:
  500. sliced_batch = self._loaded_single_cpu_batch.slice(
  501. start=offset, end=offset + batch_size)
  502. return self.learn_on_batch(sliced_batch)
  503. return self.multi_gpu_tower_stacks[buffer_index].optimize(
  504. self.get_session(), offset)
  505. def _get_input_dict_and_dummy_batch(self, view_requirements,
  506. existing_inputs):
  507. """Creates input_dict and dummy_batch for loss initialization.
  508. Used for managing the Policy's input placeholders and for loss
  509. initialization.
  510. Input_dict: Str -> tf.placeholders, dummy_batch: str -> np.arrays.
  511. Args:
  512. view_requirements (ViewReqs): The view requirements dict.
  513. existing_inputs (Dict[str, tf.placeholder]): A dict of already
  514. existing placeholders.
  515. Returns:
  516. Tuple[Dict[str, tf.placeholder], Dict[str, np.ndarray]]: The
  517. input_dict/dummy_batch tuple.
  518. """
  519. input_dict = {}
  520. for view_col, view_req in view_requirements.items():
  521. # Point state_in to the already existing self._state_inputs.
  522. mo = re.match("state_in_(\d+)", view_col)
  523. if mo is not None:
  524. input_dict[view_col] = self._state_inputs[int(mo.group(1))]
  525. # State-outs (no placeholders needed).
  526. elif view_col.startswith("state_out_"):
  527. continue
  528. # Skip action dist inputs placeholder (do later).
  529. elif view_col == SampleBatch.ACTION_DIST_INPUTS:
  530. continue
  531. # This is a tower: Input placeholders already exist.
  532. elif view_col in existing_inputs:
  533. input_dict[view_col] = existing_inputs[view_col]
  534. # All others.
  535. else:
  536. time_axis = not isinstance(view_req.shift, int)
  537. if view_req.used_for_training:
  538. # Create a +time-axis placeholder if the shift is not an
  539. # int (range or list of ints).
  540. # Do not flatten actions if action flattening disabled.
  541. if self.config.get("_disable_action_flattening") and \
  542. view_col in [SampleBatch.ACTIONS,
  543. SampleBatch.PREV_ACTIONS]:
  544. flatten = False
  545. # Do not flatten observations if no preprocessor API used.
  546. elif view_col in [SampleBatch.OBS, SampleBatch.NEXT_OBS] \
  547. and self.config["_disable_preprocessor_api"]:
  548. flatten = False
  549. # Flatten everything else.
  550. else:
  551. flatten = True
  552. input_dict[view_col] = get_placeholder(
  553. space=view_req.space,
  554. name=view_col,
  555. time_axis=time_axis,
  556. flatten=flatten,
  557. )
  558. dummy_batch = self._get_dummy_batch_from_view_requirements(
  559. batch_size=32)
  560. return SampleBatch(input_dict, seq_lens=self._seq_lens), dummy_batch
  561. @override(Policy)
  562. def _initialize_loss_from_dummy_batch(
  563. self, auto_remove_unneeded_view_reqs: bool = True,
  564. stats_fn=None) -> None:
  565. # Create the optimizer/exploration optimizer here. Some initialization
  566. # steps (e.g. exploration postprocessing) may need this.
  567. if not self._optimizers:
  568. self._optimizers = force_list(self.optimizer())
  569. # Backward compatibility.
  570. self._optimizer = self._optimizers[0]
  571. # Test calls depend on variable init, so initialize model first.
  572. self.get_session().run(tf1.global_variables_initializer())
  573. # Fields that have not been accessed are not needed for action
  574. # computations -> Tag them as `used_for_compute_actions=False`.
  575. for key, view_req in self.view_requirements.items():
  576. if not key.startswith("state_in_") and \
  577. key not in self._input_dict.accessed_keys:
  578. view_req.used_for_compute_actions = False
  579. for key, value in self._extra_action_fetches.items():
  580. self._dummy_batch[key] = get_dummy_batch_for_space(
  581. gym.spaces.Box(
  582. -1.0,
  583. 1.0,
  584. shape=value.shape.as_list()[1:],
  585. dtype=value.dtype.name),
  586. batch_size=len(self._dummy_batch),
  587. )
  588. self._input_dict[key] = get_placeholder(value=value, name=key)
  589. if key not in self.view_requirements:
  590. logger.info("Adding extra-action-fetch `{}` to "
  591. "view-reqs.".format(key))
  592. self.view_requirements[key] = ViewRequirement(
  593. space=gym.spaces.Box(
  594. -1.0,
  595. 1.0,
  596. shape=value.shape[1:],
  597. dtype=value.dtype.name),
  598. used_for_compute_actions=False,
  599. )
  600. dummy_batch = self._dummy_batch
  601. logger.info("Testing `postprocess_trajectory` w/ dummy batch.")
  602. self.exploration.postprocess_trajectory(self, dummy_batch,
  603. self.get_session())
  604. _ = self.postprocess_trajectory(dummy_batch)
  605. # Add new columns automatically to (loss) input_dict.
  606. for key in dummy_batch.added_keys:
  607. if key not in self._input_dict:
  608. self._input_dict[key] = get_placeholder(
  609. value=dummy_batch[key], name=key)
  610. if key not in self.view_requirements:
  611. self.view_requirements[key] = ViewRequirement(
  612. space=gym.spaces.Box(
  613. -1.0,
  614. 1.0,
  615. shape=dummy_batch[key].shape[1:],
  616. dtype=dummy_batch[key].dtype),
  617. used_for_compute_actions=False,
  618. )
  619. train_batch = SampleBatch(
  620. dict(self._input_dict, **self._loss_input_dict),
  621. _is_training=True,
  622. )
  623. if self._state_inputs:
  624. train_batch[SampleBatch.SEQ_LENS] = self._seq_lens
  625. self._loss_input_dict.update({
  626. SampleBatch.SEQ_LENS: train_batch[SampleBatch.SEQ_LENS]
  627. })
  628. self._loss_input_dict.update({k: v for k, v in train_batch.items()})
  629. if log_once("loss_init"):
  630. logger.debug(
  631. "Initializing loss function with dummy input:\n\n{}\n".format(
  632. summarize(train_batch)))
  633. losses = self._do_loss_init(train_batch)
  634. all_accessed_keys = \
  635. train_batch.accessed_keys | dummy_batch.accessed_keys | \
  636. dummy_batch.added_keys | set(
  637. self.model.view_requirements.keys())
  638. TFPolicy._initialize_loss(self, losses, [
  639. (k, v) for k, v in train_batch.items() if k in all_accessed_keys
  640. ] + ([(SampleBatch.SEQ_LENS, train_batch[SampleBatch.SEQ_LENS])]
  641. if SampleBatch.SEQ_LENS in train_batch else []))
  642. if "is_training" in self._loss_input_dict:
  643. del self._loss_input_dict["is_training"]
  644. # Call the grads stats fn.
  645. # TODO: (sven) rename to simply stats_fn to match eager and torch.
  646. if self._grad_stats_fn:
  647. self._stats_fetches.update(
  648. self._grad_stats_fn(self, train_batch, self._grads))
  649. # Add new columns automatically to view-reqs.
  650. if auto_remove_unneeded_view_reqs:
  651. # Add those needed for postprocessing and training.
  652. all_accessed_keys = train_batch.accessed_keys | \
  653. dummy_batch.accessed_keys
  654. # Tag those only needed for post-processing (with some exceptions).
  655. for key in dummy_batch.accessed_keys:
  656. if key not in train_batch.accessed_keys and \
  657. key not in self.model.view_requirements and \
  658. key not in [
  659. SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX,
  660. SampleBatch.UNROLL_ID, SampleBatch.DONES,
  661. SampleBatch.REWARDS, SampleBatch.INFOS,
  662. SampleBatch.OBS_EMBEDS]:
  663. if key in self.view_requirements:
  664. self.view_requirements[key].used_for_training = False
  665. if key in self._loss_input_dict:
  666. del self._loss_input_dict[key]
  667. # Remove those not needed at all (leave those that are needed
  668. # by Sampler to properly execute sample collection).
  669. # Also always leave DONES, REWARDS, and INFOS, no matter what.
  670. for key in list(self.view_requirements.keys()):
  671. if key not in all_accessed_keys and key not in [
  672. SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX,
  673. SampleBatch.UNROLL_ID, SampleBatch.DONES,
  674. SampleBatch.REWARDS, SampleBatch.INFOS] and \
  675. key not in self.model.view_requirements:
  676. # If user deleted this key manually in postprocessing
  677. # fn, warn about it and do not remove from
  678. # view-requirements.
  679. if key in dummy_batch.deleted_keys:
  680. logger.warning(
  681. "SampleBatch key '{}' was deleted manually in "
  682. "postprocessing function! RLlib will "
  683. "automatically remove non-used items from the "
  684. "data stream. Remove the `del` from your "
  685. "postprocessing function.".format(key))
  686. # If we are not writing output to disk, safe to erase
  687. # this key to save space in the sample batch.
  688. elif self.config["output"] is None:
  689. del self.view_requirements[key]
  690. if key in self._loss_input_dict:
  691. del self._loss_input_dict[key]
  692. # Add those data_cols (again) that are missing and have
  693. # dependencies by view_cols.
  694. for key in list(self.view_requirements.keys()):
  695. vr = self.view_requirements[key]
  696. if (vr.data_col is not None
  697. and vr.data_col not in self.view_requirements):
  698. used_for_training = \
  699. vr.data_col in train_batch.accessed_keys
  700. self.view_requirements[vr.data_col] = ViewRequirement(
  701. space=vr.space, used_for_training=used_for_training)
  702. self._loss_input_dict_no_rnn = {
  703. k: v
  704. for k, v in self._loss_input_dict.items()
  705. if (v not in self._state_inputs and v != self._seq_lens)
  706. }
  707. def _do_loss_init(self, train_batch: SampleBatch):
  708. losses = self._loss_fn(self, self.model, self.dist_class, train_batch)
  709. losses = force_list(losses)
  710. if self._stats_fn:
  711. self._stats_fetches.update(self._stats_fn(self, train_batch))
  712. # Override the update ops to be those of the model.
  713. self._update_ops = []
  714. if not isinstance(self.model, tf.keras.Model):
  715. self._update_ops = self.model.update_ops()
  716. return losses
  717. class TFMultiGPUTowerStack:
  718. """Optimizer that runs in parallel across multiple local devices.
  719. TFMultiGPUTowerStack automatically splits up and loads training data
  720. onto specified local devices (e.g. GPUs) with `load_data()`. During a call
  721. to `optimize()`, the devices compute gradients over slices of the data in
  722. parallel. The gradients are then averaged and applied to the shared
  723. weights.
  724. The data loaded is pinned in device memory until the next call to
  725. `load_data`, so you can make multiple passes (possibly in randomized order)
  726. over the same data once loaded.
  727. This is similar to tf1.train.SyncReplicasOptimizer, but works within a
  728. single TensorFlow graph, i.e. implements in-graph replicated training:
  729. https://www.tensorflow.org/api_docs/python/tf/train/SyncReplicasOptimizer
  730. """
  731. def __init__(
  732. self,
  733. # Deprecated.
  734. optimizer=None,
  735. devices=None,
  736. input_placeholders=None,
  737. rnn_inputs=None,
  738. max_per_device_batch_size=None,
  739. build_graph=None,
  740. grad_norm_clipping=None,
  741. # Use only `policy` argument from here on.
  742. policy: TFPolicy = None,
  743. ):
  744. """Initializes a TFMultiGPUTowerStack instance.
  745. Args:
  746. policy (TFPolicy): The TFPolicy object that this tower stack
  747. belongs to.
  748. """
  749. # Obsoleted usage, use only `policy` arg from here on.
  750. if policy is None:
  751. deprecation_warning(
  752. old="TFMultiGPUTowerStack(...)",
  753. new="TFMultiGPUTowerStack(policy=[Policy])",
  754. error=False,
  755. )
  756. self.policy = None
  757. self.optimizers = optimizer
  758. self.devices = devices
  759. self.max_per_device_batch_size = max_per_device_batch_size
  760. self.policy_copy = build_graph
  761. else:
  762. self.policy: TFPolicy = policy
  763. self.optimizers: List[LocalOptimizer] = self.policy._optimizers
  764. self.devices = self.policy.devices
  765. self.max_per_device_batch_size = \
  766. (max_per_device_batch_size or
  767. policy.config.get("sgd_minibatch_size", policy.config.get(
  768. "train_batch_size", 999999))) // len(self.devices)
  769. input_placeholders = list(
  770. self.policy._loss_input_dict_no_rnn.values())
  771. rnn_inputs = []
  772. if self.policy._state_inputs:
  773. rnn_inputs = self.policy._state_inputs + [
  774. self.policy._seq_lens
  775. ]
  776. grad_norm_clipping = self.policy.config.get("grad_clip")
  777. self.policy_copy = self.policy.copy
  778. assert len(self.devices) > 1 or "gpu" in self.devices[0]
  779. self.loss_inputs = input_placeholders + rnn_inputs
  780. shared_ops = tf1.get_collection(
  781. tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name)
  782. # Then setup the per-device loss graphs that use the shared weights
  783. self._batch_index = tf1.placeholder(tf.int32, name="batch_index")
  784. # Dynamic batch size, which may be shrunk if there isn't enough data
  785. self._per_device_batch_size = tf1.placeholder(
  786. tf.int32, name="per_device_batch_size")
  787. self._loaded_per_device_batch_size = max_per_device_batch_size
  788. # When loading RNN input, we dynamically determine the max seq len
  789. self._max_seq_len = tf1.placeholder(tf.int32, name="max_seq_len")
  790. self._loaded_max_seq_len = 1
  791. # Split on the CPU in case the data doesn't fit in GPU memory.
  792. with tf.device("/cpu:0"):
  793. data_splits = zip(
  794. *[tf.split(ph, len(self.devices)) for ph in self.loss_inputs])
  795. self._towers = []
  796. for tower_i, (device, device_placeholders) in enumerate(
  797. zip(self.devices, data_splits)):
  798. self._towers.append(
  799. self._setup_device(tower_i, device, device_placeholders,
  800. len(input_placeholders)))
  801. if self.policy.config["_tf_policy_handles_more_than_one_loss"]:
  802. avgs = []
  803. for i, optim in enumerate(self.optimizers):
  804. avg = average_gradients([t.grads[i] for t in self._towers])
  805. if grad_norm_clipping:
  806. clipped = []
  807. for grad, _ in avg:
  808. clipped.append(grad)
  809. clipped, _ = tf.clip_by_global_norm(
  810. clipped, grad_norm_clipping)
  811. for i, (grad, var) in enumerate(avg):
  812. avg[i] = (clipped[i], var)
  813. avgs.append(avg)
  814. # Gather update ops for any batch norm layers.
  815. # TODO(ekl) here we
  816. # will use all the ops found which won't work for DQN / DDPG, but
  817. # those aren't supported with multi-gpu right now anyways.
  818. self._update_ops = tf1.get_collection(
  819. tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name)
  820. for op in shared_ops:
  821. self._update_ops.remove(op) # only care about tower update ops
  822. if self._update_ops:
  823. logger.debug("Update ops to run on apply gradient: {}".format(
  824. self._update_ops))
  825. with tf1.control_dependencies(self._update_ops):
  826. self._train_op = tf.group([
  827. o.apply_gradients(a)
  828. for o, a in zip(self.optimizers, avgs)
  829. ])
  830. else:
  831. avg = average_gradients([t.grads for t in self._towers])
  832. if grad_norm_clipping:
  833. clipped = []
  834. for grad, _ in avg:
  835. clipped.append(grad)
  836. clipped, _ = tf.clip_by_global_norm(clipped,
  837. grad_norm_clipping)
  838. for i, (grad, var) in enumerate(avg):
  839. avg[i] = (clipped[i], var)
  840. # Gather update ops for any batch norm layers.
  841. # TODO(ekl) here we
  842. # will use all the ops found which won't work for DQN / DDPG, but
  843. # those aren't supported with multi-gpu right now anyways.
  844. self._update_ops = tf1.get_collection(
  845. tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name)
  846. for op in shared_ops:
  847. self._update_ops.remove(op) # only care about tower update ops
  848. if self._update_ops:
  849. logger.debug("Update ops to run on apply gradient: {}".format(
  850. self._update_ops))
  851. with tf1.control_dependencies(self._update_ops):
  852. self._train_op = self.optimizers[0].apply_gradients(avg)
  853. def load_data(self, sess, inputs, state_inputs):
  854. """Bulk loads the specified inputs into device memory.
  855. The shape of the inputs must conform to the shapes of the input
  856. placeholders this optimizer was constructed with.
  857. The data is split equally across all the devices. If the data is not
  858. evenly divisible by the batch size, excess data will be discarded.
  859. Args:
  860. sess: TensorFlow session.
  861. inputs: List of arrays matching the input placeholders, of shape
  862. [BATCH_SIZE, ...].
  863. state_inputs: List of RNN input arrays. These arrays have size
  864. [BATCH_SIZE / MAX_SEQ_LEN, ...].
  865. Returns:
  866. The number of tuples loaded per device.
  867. """
  868. if log_once("load_data"):
  869. logger.info(
  870. "Training on concatenated sample batches:\n\n{}\n".format(
  871. summarize({
  872. "placeholders": self.loss_inputs,
  873. "inputs": inputs,
  874. "state_inputs": state_inputs
  875. })))
  876. feed_dict = {}
  877. assert len(self.loss_inputs) == len(inputs + state_inputs), \
  878. (self.loss_inputs, inputs, state_inputs)
  879. # Let's suppose we have the following input data, and 2 devices:
  880. # 1 2 3 4 5 6 7 <- state inputs shape
  881. # A A A B B B C C C D D D E E E F F F G G G <- inputs shape
  882. # The data is truncated and split across devices as follows:
  883. # |---| seq len = 3
  884. # |---------------------------------| seq batch size = 6 seqs
  885. # |----------------| per device batch size = 9 tuples
  886. if len(state_inputs) > 0:
  887. smallest_array = state_inputs[0]
  888. seq_len = len(inputs[0]) // len(state_inputs[0])
  889. self._loaded_max_seq_len = seq_len
  890. else:
  891. smallest_array = inputs[0]
  892. self._loaded_max_seq_len = 1
  893. sequences_per_minibatch = (
  894. self.max_per_device_batch_size // self._loaded_max_seq_len * len(
  895. self.devices))
  896. if sequences_per_minibatch < 1:
  897. logger.warning(
  898. ("Target minibatch size is {}, however the rollout sequence "
  899. "length is {}, hence the minibatch size will be raised to "
  900. "{}.").format(self.max_per_device_batch_size,
  901. self._loaded_max_seq_len,
  902. self._loaded_max_seq_len * len(self.devices)))
  903. sequences_per_minibatch = 1
  904. if len(smallest_array) < sequences_per_minibatch:
  905. # Dynamically shrink the batch size if insufficient data
  906. sequences_per_minibatch = make_divisible_by(
  907. len(smallest_array), len(self.devices))
  908. if log_once("data_slicing"):
  909. logger.info(
  910. ("Divided {} rollout sequences, each of length {}, among "
  911. "{} devices.").format(
  912. len(smallest_array), self._loaded_max_seq_len,
  913. len(self.devices)))
  914. if sequences_per_minibatch < len(self.devices):
  915. raise ValueError(
  916. "Must load at least 1 tuple sequence per device. Try "
  917. "increasing `sgd_minibatch_size` or reducing `max_seq_len` "
  918. "to ensure that at least one sequence fits per device.")
  919. self._loaded_per_device_batch_size = (sequences_per_minibatch // len(
  920. self.devices) * self._loaded_max_seq_len)
  921. if len(state_inputs) > 0:
  922. # First truncate the RNN state arrays to the sequences_per_minib.
  923. state_inputs = [
  924. make_divisible_by(arr, sequences_per_minibatch)
  925. for arr in state_inputs
  926. ]
  927. # Then truncate the data inputs to match
  928. inputs = [arr[:len(state_inputs[0]) * seq_len] for arr in inputs]
  929. assert len(state_inputs[0]) * seq_len == len(inputs[0]), \
  930. (len(state_inputs[0]), sequences_per_minibatch, seq_len,
  931. len(inputs[0]))
  932. for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
  933. feed_dict[ph] = arr
  934. truncated_len = len(inputs[0])
  935. else:
  936. truncated_len = 0
  937. for ph, arr in zip(self.loss_inputs, inputs):
  938. truncated_arr = make_divisible_by(arr, sequences_per_minibatch)
  939. feed_dict[ph] = truncated_arr
  940. if truncated_len == 0:
  941. truncated_len = len(truncated_arr)
  942. sess.run([t.init_op for t in self._towers], feed_dict=feed_dict)
  943. self.num_tuples_loaded = truncated_len
  944. samples_per_device = truncated_len // len(self.devices)
  945. assert samples_per_device > 0, "No data loaded?"
  946. assert samples_per_device % self._loaded_per_device_batch_size == 0
  947. # Return loaded samples per-device.
  948. return samples_per_device
  949. def optimize(self, sess, batch_index):
  950. """Run a single step of SGD.
  951. Runs a SGD step over a slice of the preloaded batch with size given by
  952. self._loaded_per_device_batch_size and offset given by the batch_index
  953. argument.
  954. Updates shared model weights based on the averaged per-device
  955. gradients.
  956. Args:
  957. sess: TensorFlow session.
  958. batch_index: Offset into the preloaded data. This value must be
  959. between `0` and `tuples_per_device`. The amount of data to
  960. process is at most `max_per_device_batch_size`.
  961. Returns:
  962. The outputs of extra_ops evaluated over the batch.
  963. """
  964. feed_dict = {
  965. self._batch_index: batch_index,
  966. self._per_device_batch_size: self._loaded_per_device_batch_size,
  967. self._max_seq_len: self._loaded_max_seq_len,
  968. }
  969. for tower in self._towers:
  970. feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict())
  971. fetches = {"train": self._train_op}
  972. for tower_num, tower in enumerate(self._towers):
  973. tower_fetch = tower.loss_graph._get_grad_and_stats_fetches()
  974. fetches["tower_{}".format(tower_num)] = tower_fetch
  975. return sess.run(fetches, feed_dict=feed_dict)
  976. def get_device_losses(self):
  977. return [t.loss_graph for t in self._towers]
  978. def _setup_device(self, tower_i, device, device_input_placeholders,
  979. num_data_in):
  980. assert num_data_in <= len(device_input_placeholders)
  981. with tf.device(device):
  982. with tf1.name_scope(TOWER_SCOPE_NAME + f"_{tower_i}"):
  983. device_input_batches = []
  984. device_input_slices = []
  985. for i, ph in enumerate(device_input_placeholders):
  986. current_batch = tf1.Variable(
  987. ph,
  988. trainable=False,
  989. validate_shape=False,
  990. collections=[])
  991. device_input_batches.append(current_batch)
  992. if i < num_data_in:
  993. scale = self._max_seq_len
  994. granularity = self._max_seq_len
  995. else:
  996. scale = self._max_seq_len
  997. granularity = 1
  998. current_slice = tf.slice(
  999. current_batch,
  1000. ([self._batch_index // scale * granularity] +
  1001. [0] * len(ph.shape[1:])),
  1002. ([self._per_device_batch_size // scale * granularity] +
  1003. [-1] * len(ph.shape[1:])))
  1004. current_slice.set_shape(ph.shape)
  1005. device_input_slices.append(current_slice)
  1006. graph_obj = self.policy_copy(device_input_slices)
  1007. device_grads = graph_obj.gradients(self.optimizers,
  1008. graph_obj._losses)
  1009. return Tower(
  1010. tf.group(
  1011. *[batch.initializer for batch in device_input_batches]),
  1012. device_grads, graph_obj)
  1013. # Each tower is a copy of the loss graph pinned to a specific device.
  1014. Tower = namedtuple("Tower", ["init_op", "grads", "loss_graph"])
  1015. def make_divisible_by(a, n):
  1016. if type(a) is int:
  1017. return a - a % n
  1018. return a[0:a.shape[0] - a.shape[0] % n]
  1019. def average_gradients(tower_grads):
  1020. """Averages gradients across towers.
  1021. Calculate the average gradient for each shared variable across all towers.
  1022. Note that this function provides a synchronization point across all towers.
  1023. Args:
  1024. tower_grads: List of lists of (gradient, variable) tuples. The outer
  1025. list is over individual gradients. The inner list is over the
  1026. gradient calculation for each tower.
  1027. Returns:
  1028. List of pairs of (gradient, variable) where the gradient has been
  1029. averaged across all towers.
  1030. TODO(ekl): We could use NCCL if this becomes a bottleneck.
  1031. """
  1032. average_grads = []
  1033. for grad_and_vars in zip(*tower_grads):
  1034. # Note that each grad_and_vars looks like the following:
  1035. # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
  1036. grads = []
  1037. for g, _ in grad_and_vars:
  1038. if g is not None:
  1039. # Add 0 dimension to the gradients to represent the tower.
  1040. expanded_g = tf.expand_dims(g, 0)
  1041. # Append on a 'tower' dimension which we will average over
  1042. # below.
  1043. grads.append(expanded_g)
  1044. if not grads:
  1045. continue
  1046. # Average over the 'tower' dimension.
  1047. grad = tf.concat(axis=0, values=grads)
  1048. grad = tf.reduce_mean(grad, 0)
  1049. # Keep in mind that the Variables are redundant because they are shared
  1050. # across towers. So .. we will just return the first tower's pointer to
  1051. # the Variable.
  1052. v = grad_and_vars[0][1]
  1053. grad_and_var = (grad, v)
  1054. average_grads.append(grad_and_var)
  1055. return average_grads