eigenvalue.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. import torch
  2. from deepspeed.utils import log_dist
  3. import numpy as np
  4. import logging
  5. class Eigenvalue(object):
  6. def __init__(self,
  7. verbose=False,
  8. max_iter=100,
  9. tol=1e-2,
  10. stability=0,
  11. gas_boundary_resolution=1,
  12. layer_name='',
  13. layer_num=0):
  14. super().__init__()
  15. self.verbose = verbose
  16. self.max_iter = max_iter
  17. self.tol = tol
  18. self.stability = stability
  19. self.gas_boundary_resolution = gas_boundary_resolution
  20. self.layer_name = layer_name
  21. self.layer_num = layer_num
  22. assert len(self.layer_name) > 0 and layer_num > 0
  23. log_dist(
  24. f'enabled eigenvalue with verbose={verbose}, max_iter={max_iter}, tol={tol}, stability={stability}, gas_boundary_resolution={gas_boundary_resolution}, layer_name={layer_name}, layer_num={layer_num}',
  25. ranks=[0])
  26. # Replace all nan/pos-inf/neg-inf to zero
  27. # TODO: Pytorch new version may add this function, replace this one by then.
  28. def nan_to_num(self, x):
  29. device = x.device
  30. x = x.cpu().numpy()
  31. x = np.nan_to_num(x=x, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
  32. return torch.from_numpy(x).to(device)
  33. def normalize(self, v):
  34. norm_squared = self.inner_product(v, v)
  35. norm = norm_squared**0.5 + self.stability
  36. normalized_vectors = [vector / norm for vector in v]
  37. normalized_vectors = [self.nan_to_num(vector) for vector in normalized_vectors]
  38. return normalized_vectors
  39. def inner_product(self, xs, ys):
  40. return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)])
  41. def get_layers(self, module):
  42. scope_names = self.layer_name.split('.')
  43. assert len(scope_names) > 0
  44. m = module
  45. for name in scope_names:
  46. assert hasattr(m, name), "layer_name configuration is invalid."
  47. m = getattr(m, name)
  48. return m
  49. def compute_eigenvalue(self, module, device=None, scale=1.0):
  50. block_eigenvalue = []
  51. param_keys = []
  52. layers = self.get_layers(module)
  53. for block in range(self.layer_num):
  54. model_block = layers[block]
  55. # We found this randn() has obvious accuracy impact in some cases, save/recover random state here.
  56. rng_state = torch.random.get_rng_state()
  57. if device is None:
  58. v = [
  59. torch.randn(p.size()) for p in model_block.parameters()
  60. if p.grad is not None and p.grad.grad_fn is not None
  61. ]
  62. else:
  63. v = [
  64. torch.randn(p.size(),
  65. device=device) for p in model_block.parameters()
  66. if p.grad is not None and p.grad.grad_fn is not None
  67. ]
  68. torch.random.set_rng_state(rng_state)
  69. grads = [
  70. param.grad for param in model_block.parameters()
  71. if param.grad is not None and param.grad.grad_fn is not None
  72. ]
  73. params = [
  74. param for param in model_block.parameters()
  75. if param.grad is not None and param.grad.grad_fn is not None
  76. ]
  77. layer_keys = [id(p) for p in model_block.parameters()]
  78. param_keys.append(layer_keys)
  79. v = self.normalize(v)
  80. # Disable eigenvalue if the model doesn't support second order gradients computation,
  81. # e.g. when enabling DS transformer kernel.
  82. if len(grads) == 0 or len(params) == 0:
  83. log_dist(f'The model does NOT support eigenvalue computation.',
  84. ranks=[0],
  85. level=logging.WARNING)
  86. return []
  87. i = 0
  88. eigenvalue_current, eigenvalue_previous = 1., 0.
  89. while (i < self.max_iter) and abs(eigenvalue_current) > 0 and (abs(
  90. (eigenvalue_current - eigenvalue_previous) /
  91. eigenvalue_current) >= self.tol): # test convergence criteria
  92. eigenvalue_previous = eigenvalue_current
  93. Hv = torch.autograd.grad(grads,
  94. params,
  95. grad_outputs=v,
  96. only_inputs=True,
  97. retain_graph=True)
  98. #Hv = [hv.float() for hv in Hv]
  99. Hv = [self.nan_to_num(hv).float() for hv in Hv]
  100. eigenvalue_current = self.inner_product(Hv, v).item()
  101. v = self.normalize(Hv)
  102. v = [x / scale for x in v]
  103. i += 1
  104. eigenvalue_current *= scale
  105. block_eigenvalue.append(eigenvalue_current)
  106. if self.verbose:
  107. log_dist(
  108. f'block: {block}, power iteration: {i}, eigenvalue: {eigenvalue_current}',
  109. ranks=[0])
  110. block_eigenvalue = self.post_process(block_eigenvalue)
  111. if self.verbose:
  112. log_dist(f'post processed block_eigenvalue: {block_eigenvalue}', ranks=[0])
  113. # {param_id: (eigenvalue, layer_id)}
  114. ev_dict = {}
  115. for i, (layer_keys, value) in enumerate(zip(param_keys, block_eigenvalue)):
  116. ev_dict.update(dict.fromkeys(layer_keys, (value, i)))
  117. return ev_dict
  118. # 1. Map all eigenvalues to [0, 1.0].
  119. # 2. Some layers can't generate valid eigenvalues on fp16 precision, use 1.0 instead.
  120. def post_process(self, value_list):
  121. max_value = abs(max(value_list, key=abs))
  122. return [abs(v) / max_value if v != 0.0 else 1.0 for v in value_list]