debug.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. # For lazy import with printflock()
  5. fcntl = None
  6. # for debug purposes map module and param objects to their fully qualified names
  7. module_names = {}
  8. param_names = {}
  9. def debug_clear_module_and_param_names():
  10. global module_names
  11. global param_names
  12. module_names = {}
  13. param_names = {}
  14. def debug_extract_module_and_param_names(model):
  15. # extract the fully qualified names as soon as the model is acquired
  16. global module_names
  17. global param_names
  18. # XXX: can probably make a map of param2module and vice-versa
  19. module_names = {module: name for name, module in model.named_modules()}
  20. param_names = {param: name for name, param in model.named_parameters()}
  21. def debug_module2name(module):
  22. if module in module_names:
  23. return module_names[module]
  24. else:
  25. return "unknown"
  26. def debug_module2name_id(module):
  27. return f"name={debug_module2name(module)} id={module.id}"
  28. def debug_module2name_class(module):
  29. return f"name={debug_module2name(module)} {module.__class__.__name__}"
  30. def debug_param2name(param):
  31. if param in param_names:
  32. return param_names[param]
  33. else:
  34. return "unknown"
  35. def debug_param2name_id(param):
  36. return f"name={debug_param2name(param)} id={param.ds_id}"
  37. def debug_param2name_id_shape(param):
  38. return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}"
  39. def debug_param2name_id_shape_device(param):
  40. return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}"
  41. def debug_param2name_id_numel(param):
  42. return f"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}"
  43. def debug_param2name_id_shape_status(param):
  44. return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}"
  45. def printflock(*msgs):
  46. """
  47. For printing messages for all concurrent gpus w/o getting interleaved text.
  48. This is useful when debugging issues where multi-gpus don't sync.
  49. 1. Enable the force debug in say partitioning and zero3 files
  50. 2. Override the usual versions with ::
  51. def print_rank_0(message, debug=False, force=False):
  52. rank = deepspeed.comm.get_rank()
  53. printflock(f"[{rank}] {message}")
  54. 3. run the program and you get both logs non-interleaved
  55. But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper
  56. function might be more useful, as it's easier to send each log stream into a separate file and
  57. then compare those.
  58. """
  59. global fcntl
  60. if fcntl is None:
  61. import fcntl
  62. with open(__file__, "r") as fh:
  63. fcntl.flock(fh, fcntl.LOCK_EX)
  64. try:
  65. print(*msgs)
  66. finally:
  67. fcntl.flock(fh, fcntl.LOCK_UN)
  68. fh = None
  69. def log_rank_file(rank, *msgs):
  70. """
  71. Print to a log file of the given rank
  72. This is useful for debugging hanging in sync processes. Here is a possible workflow:
  73. 1. Enable the force debug in say partitioning and zero3 files
  74. 2. Override the usual versions of print_rank_0 in those files with ::
  75. def print_rank_0(message, debug=False, force=False):
  76. rank = deepspeed.comm.get_rank()
  77. log_rank_file(rank, message)
  78. 3. run the program
  79. 4. fix up the expected differences, e.g. different cuda numbers ::
  80. perl -pi -e 's|cuda:1|cuda:0|' log_rank_*
  81. 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same
  82. work (e.g. when some layers get conditionally skipped on one gpu but not all)
  83. diff -u log_rank_0.txt log_rank_1.txt | less
  84. """
  85. global fh
  86. if fh is None:
  87. fh = open(f"log_rank_{rank}.txt", "w")
  88. for m in msgs:
  89. fh.write(f"{m}\n")
  90. fh.flush()
  91. def print_backward_tensors(tensor):
  92. def _print_bwd_tensors(grad_fn):
  93. print(f"Backward tensors in {grad_fn}")
  94. for funcs in grad_fn.next_functions:
  95. if funcs[0]:
  96. try:
  97. tensor = getattr(funcs[0], 'variable')
  98. print(funcs[0])
  99. print(f"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}")
  100. except AttributeError as e:
  101. _print_bwd_tensors(funcs[0])
  102. if hasattr(tensor, 'grad_fn'):
  103. _print_bwd_tensors(tensor.grad_fn)