npu_accelerator.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. import torch
  5. from .abstract_accelerator import DeepSpeedAccelerator
  6. class NPU_Accelerator(DeepSpeedAccelerator):
  7. def __init__(self):
  8. self._name = 'npu'
  9. self._communication_backend_name = 'hccl'
  10. def is_synchronized_device(self):
  11. return False
  12. # Device APIs
  13. def device_name(self, device_index=None):
  14. if device_index == None:
  15. return 'npu'
  16. return 'npu:{}'.format(device_index)
  17. def device(self, device_index=None):
  18. return torch.npu.device(device_index)
  19. def set_device(self, device_index):
  20. torch.npu.set_device(device_index)
  21. def current_device(self):
  22. return torch.npu.current_device()
  23. def current_device_name(self):
  24. return 'npu:{}'.format(torch.npu.current_device())
  25. def device_count(self):
  26. return torch.npu.device_count()
  27. def synchronize(self, device_index=None):
  28. return torch.npu.synchronize(device_index)
  29. # RNG APIs
  30. def random(self):
  31. return torch.random
  32. def set_rng_state(self, new_state, device_index=None):
  33. if device_index is None:
  34. return torch.npu.set_rng_state(new_state)
  35. return torch.npu.set_rng_state(new_state, device_index)
  36. def get_rng_state(self, device_index=None):
  37. if device_index is None:
  38. return torch.npu.get_rng_state()
  39. return torch.npu.get_rng_state(device_index)
  40. def manual_seed(self, seed):
  41. return torch.npu.manual_seed(seed)
  42. def manual_seed_all(self, seed):
  43. return torch.npu.manual_seed_all(seed)
  44. def initial_seed(self, seed):
  45. return torch.npu.initial_seed(seed)
  46. def default_generator(self, device_index):
  47. return torch.npu.default_generators[device_index]
  48. # Streams/Events
  49. @property
  50. def Stream(self):
  51. return torch.npu.Stream
  52. def stream(self, stream):
  53. return torch.npu.stream(stream)
  54. def current_stream(self, device_index=None):
  55. return torch.npu.current_stream(device_index)
  56. def default_stream(self, device_index=None):
  57. return torch.npu.default_stream(device_index)
  58. @property
  59. def Event(self):
  60. return torch.npu.Event
  61. # Memory management
  62. def empty_cache(self):
  63. return torch.npu.empty_cache()
  64. def memory_allocated(self, device_index=None):
  65. return torch.npu.memory_allocated(device_index)
  66. def max_memory_allocated(self, device_index=None):
  67. return torch.npu.max_memory_allocated(device_index)
  68. def reset_max_memory_allocated(self, device_index=None):
  69. return torch.npu.reset_max_memory_allocated(device_index)
  70. def memory_cached(self, device_index=None):
  71. return torch.npu.memory_cached(device_index)
  72. def max_memory_cached(self, device_index=None):
  73. return torch.npu.max_memory_cached(device_index)
  74. def reset_max_memory_cached(self, device_index=None):
  75. return torch.npu.reset_max_memory_cached(device_index)
  76. def memory_stats(self, device_index=None):
  77. if hasattr(torch.npu, 'memory_stats'):
  78. return torch.npu.memory_stats(device_index)
  79. def reset_peak_memory_stats(self, device_index=None):
  80. if hasattr(torch.npu, 'reset_peak_memory_stats'):
  81. return torch.npu.reset_peak_memory_stats(device_index)
  82. def memory_reserved(self, device_index=None):
  83. if hasattr(torch.npu, 'memory_reserved'):
  84. return torch.npu.memory_reserved(device_index)
  85. def max_memory_reserved(self, device_index=None):
  86. if hasattr(torch.npu, 'max_memory_reserved'):
  87. return torch.npu.max_memory_reserved(device_index)
  88. def total_memory(self, device_index=None):
  89. return torch.npu.get_device_properties(device_index).total_memory
  90. # Data types
  91. def is_bf16_supported(self):
  92. return torch.npu.is_bf16_supported()
  93. def is_fp16_supported(self):
  94. return True
  95. # Misc
  96. def amp(self):
  97. if hasattr(torch.npu, 'amp'):
  98. return torch.npu.amp
  99. return None
  100. def is_available(self):
  101. return torch.npu.is_available()
  102. def range_push(self, msg):
  103. return
  104. def range_pop(self):
  105. return
  106. def lazy_call(self, callback):
  107. return torch.npu._lazy_call(callback)
  108. def communication_backend_name(self):
  109. return self._communication_backend_name
  110. # Tensor operations
  111. @property
  112. def BFloat16Tensor(self):
  113. return torch.npu.BFloat16Tensor
  114. @property
  115. def ByteTensor(self):
  116. return torch.npu.ByteTensor
  117. @property
  118. def DoubleTensor(self):
  119. return torch.npu.DoubleTensor
  120. @property
  121. def FloatTensor(self):
  122. return torch.npu.FloatTensor
  123. @property
  124. def HalfTensor(self):
  125. return torch.npu.HalfTensor
  126. @property
  127. def IntTensor(self):
  128. return torch.npu.IntTensor
  129. @property
  130. def LongTensor(self):
  131. return torch.npu.LongTensor
  132. def pin_memory(self, tensor):
  133. return tensor.pin_memory()
  134. def on_accelerator(self, tensor):
  135. device_str = str(tensor.device)
  136. if device_str.startswith('npu:'):
  137. return True
  138. else:
  139. return False
  140. def op_builder_dir(self):
  141. try:
  142. # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
  143. # if successful this also means we're doing a local install and not JIT compile path
  144. from op_builder import __deepspeed__ # noqa: F401
  145. return "op_builder.npu"
  146. except ImportError:
  147. return "deepspeed.ops.op_builder.npu"
  148. # create an instance of op builder and return, name specified by class_name
  149. def create_op_builder(self, class_name):
  150. builder_class = self.get_op_builder(class_name)
  151. if builder_class != None:
  152. return builder_class()
  153. return None
  154. # return an op builder class, name specified by class_name
  155. def get_op_builder(self, class_name):
  156. try:
  157. # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
  158. # if successful this also means we're doing a local install and not JIT compile path
  159. from op_builder import __deepspeed__ # noqa: F401
  160. from op_builder.npu import NotImplementedBuilder
  161. except ImportError:
  162. from deepspeed.ops.op_builder.npu import NotImplementedBuilder
  163. # return a NPUNotImplementedBuilder to avoid get NoneType[Name] in unit tests
  164. return NotImplementedBuilder
  165. def build_extension(self):
  166. from torch.utils.cpp_extension import BuildExtension
  167. return BuildExtension