sgd.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. """Utils for minibatch SGD across multiple RLlib policies."""
  2. import logging
  3. import numpy as np
  4. import random
  5. from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
  6. from ray.rllib.utils.metrics.learner_info import LearnerInfoBuilder
  7. logger = logging.getLogger(__name__)
  8. def standardized(array: np.ndarray):
  9. """Normalize the values in an array.
  10. Args:
  11. array (np.ndarray): Array of values to normalize.
  12. Returns:
  13. array with zero mean and unit standard deviation.
  14. """
  15. return (array - array.mean()) / max(1e-4, array.std())
  16. def minibatches(samples: SampleBatch,
  17. sgd_minibatch_size: int,
  18. shuffle: bool = True):
  19. """Return a generator yielding minibatches from a sample batch.
  20. Args:
  21. samples: SampleBatch to split up.
  22. sgd_minibatch_size: Size of minibatches to return.
  23. shuffle: Whether to shuffle the order of the generated minibatches.
  24. Note that in case of a non-recurrent policy, the incoming batch
  25. is globally shuffled first regardless of this setting, before
  26. the minibatches are generated from it!
  27. Yields:
  28. SampleBatch: Each of size `sgd_minibatch_size`.
  29. """
  30. if not sgd_minibatch_size:
  31. yield samples
  32. return
  33. if isinstance(samples, MultiAgentBatch):
  34. raise NotImplementedError(
  35. "Minibatching not implemented for multi-agent in simple mode")
  36. if "state_in_0" not in samples and "state_out_0" not in samples:
  37. samples.shuffle()
  38. all_slices = samples._get_slice_indices(sgd_minibatch_size)
  39. data_slices, state_slices = all_slices
  40. if len(state_slices) == 0:
  41. if shuffle:
  42. random.shuffle(data_slices)
  43. for i, j in data_slices:
  44. yield samples.slice(i, j)
  45. else:
  46. all_slices = list(zip(data_slices, state_slices))
  47. if shuffle:
  48. # Make sure to shuffle data and states while linked together.
  49. random.shuffle(all_slices)
  50. for (i, j), (si, sj) in all_slices:
  51. yield samples.slice(i, j, si, sj)
  52. def do_minibatch_sgd(samples, policies, local_worker, num_sgd_iter,
  53. sgd_minibatch_size, standardize_fields):
  54. """Execute minibatch SGD.
  55. Args:
  56. samples (SampleBatch): Batch of samples to optimize.
  57. policies (dict): Dictionary of policies to optimize.
  58. local_worker (RolloutWorker): Master rollout worker instance.
  59. num_sgd_iter (int): Number of epochs of optimization to take.
  60. sgd_minibatch_size (int): Size of minibatches to use for optimization.
  61. standardize_fields (list): List of sample field names that should be
  62. normalized prior to optimization.
  63. Returns:
  64. averaged info fetches over the last SGD epoch taken.
  65. """
  66. # Handle everything as if multi-agent.
  67. samples = samples.as_multi_agent()
  68. # Use LearnerInfoBuilder as a unified way to build the final
  69. # results dict from `learn_on_loaded_batch` call(s).
  70. # This makes sure results dicts always have the same structure
  71. # no matter the setup (multi-GPU, multi-agent, minibatch SGD,
  72. # tf vs torch).
  73. learner_info_builder = LearnerInfoBuilder(num_devices=1)
  74. for policy_id, policy in policies.items():
  75. if policy_id not in samples.policy_batches:
  76. continue
  77. batch = samples.policy_batches[policy_id]
  78. for field in standardize_fields:
  79. batch[field] = standardized(batch[field])
  80. # Check to make sure that the sgd_minibatch_size is not smaller
  81. # than max_seq_len otherwise this will cause indexing errors while
  82. # performing sgd when using a RNN or Attention model
  83. if policy.is_recurrent() and \
  84. policy.config["model"]["max_seq_len"] > sgd_minibatch_size:
  85. raise ValueError("`sgd_minibatch_size` ({}) cannot be smaller than"
  86. "`max_seq_len` ({}).".format(
  87. sgd_minibatch_size,
  88. policy.config["model"]["max_seq_len"]))
  89. for i in range(num_sgd_iter):
  90. for minibatch in minibatches(batch, sgd_minibatch_size):
  91. results = (local_worker.learn_on_batch(
  92. MultiAgentBatch({
  93. policy_id: minibatch
  94. }, minibatch.count)))[policy_id]
  95. learner_info_builder.add_learn_on_batch_results(
  96. results, policy_id)
  97. learner_info = learner_info_builder.finalize()
  98. return learner_info