mlu_accelerator.py 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. import importlib
  5. import inspect
  6. import functools
  7. from .abstract_accelerator import DeepSpeedAccelerator
  8. import torch
  9. # During setup stage torch may not be installed, pass on no torch will
  10. # allow op builder related API to be executed.
  11. class MLU_Accelerator(DeepSpeedAccelerator):
  12. def __init__(self):
  13. self._name = 'mlu'
  14. self._communication_backend_name = 'cncl'
  15. self._compile_backend = "inductor"
  16. self.class_dict = None
  17. def is_synchronized_device(self):
  18. return False
  19. def use_host_timers(self):
  20. return self.is_synchronized_device()
  21. def resolves_data_dependency(self):
  22. return self.is_synchronized_device()
  23. def handles_memory_backpressure(self):
  24. return self.is_synchronized_device()
  25. # Device APIs
  26. def device_name(self, device_index=None):
  27. if device_index == None:
  28. return 'mlu'
  29. return 'mlu:{}'.format(device_index)
  30. def device(self, device_index=None):
  31. return torch.mlu.device(device_index)
  32. def set_device(self, device_index):
  33. torch.mlu.set_device(device_index)
  34. def current_device(self):
  35. return torch.mlu.current_device()
  36. def current_device_name(self):
  37. return 'mlu:{}'.format(torch.mlu.current_device())
  38. def device_count(self):
  39. return torch.mlu.device_count()
  40. def synchronize(self, device_index=None):
  41. return torch.mlu.synchronize(device_index)
  42. # RNG APIs
  43. def random(self):
  44. return torch.random
  45. def set_rng_state(self, new_state, device_index=None):
  46. if device_index is None:
  47. return torch.mlu.set_rng_state(new_state)
  48. return torch.mlu.set_rng_state(new_state, device_index)
  49. def get_rng_state(self, device_index=None):
  50. if device_index is None:
  51. return torch.mlu.get_rng_state()
  52. return torch.mlu.get_rng_state(device_index)
  53. def manual_seed(self, seed):
  54. return torch.mlu.manual_seed(seed)
  55. def manual_seed_all(self, seed):
  56. return torch.mlu.manual_seed_all(seed)
  57. def initial_seed(self, seed):
  58. return torch.mlu.initial_seed(seed)
  59. def default_generator(self, device_index):
  60. return torch.mlu.default_generators[device_index]
  61. # Streams/Events
  62. @property
  63. def Stream(self):
  64. return torch.mlu.Stream
  65. def stream(self, stream):
  66. return torch.mlu.stream(stream)
  67. def current_stream(self, device_index=None):
  68. return torch.mlu.current_stream(device_index)
  69. def default_stream(self, device_index=None):
  70. return torch.mlu.default_stream(device_index)
  71. @property
  72. def Event(self):
  73. return torch.mlu.Event
  74. # Memory management
  75. def empty_cache(self):
  76. return torch.mlu.empty_cache()
  77. def memory_allocated(self, device_index=None):
  78. return torch.mlu.memory_allocated(device_index)
  79. def max_memory_allocated(self, device_index=None):
  80. return torch.mlu.max_memory_allocated(device_index)
  81. def reset_max_memory_allocated(self, device_index=None):
  82. return torch.mlu.reset_max_memory_allocated(device_index)
  83. def memory_cached(self, device_index=None):
  84. return torch.mlu.memory_cached(device_index)
  85. def max_memory_cached(self, device_index=None):
  86. return torch.mlu.max_memory_cached(device_index)
  87. def reset_max_memory_cached(self, device_index=None):
  88. return torch.mlu.reset_max_memory_cached(device_index)
  89. def memory_stats(self, device_index=None):
  90. if hasattr(torch.mlu, 'memory_stats'):
  91. return torch.mlu.memory_stats(device_index)
  92. def reset_peak_memory_stats(self, device_index=None):
  93. if hasattr(torch.mlu, 'reset_peak_memory_stats'):
  94. return torch.mlu.reset_peak_memory_stats(device_index)
  95. def memory_reserved(self, device_index=None):
  96. if hasattr(torch.mlu, 'memory_reserved'):
  97. return torch.mlu.memory_reserved(device_index)
  98. def max_memory_reserved(self, device_index=None):
  99. if hasattr(torch.mlu, 'max_memory_reserved'):
  100. return torch.mlu.max_memory_reserved(device_index)
  101. def total_memory(self, device_index=None):
  102. return torch.mlu.get_device_properties(device_index).total_memory
  103. def available_memory(self, device_index=None):
  104. return self.total_memory(device_index) - self.memory_allocated(device_index)
  105. # Data types
  106. def is_bf16_supported(self):
  107. return torch.mlu.is_bf16_supported()
  108. def is_fp16_supported(self):
  109. return True
  110. def supported_dtypes(self):
  111. supported_dtypes = [torch.float]
  112. if self.is_fp16_supported():
  113. supported_dtypes.append(torch.half)
  114. if self.is_bf16_supported():
  115. supported_dtypes.append(torch.bfloat16)
  116. return supported_dtypes
  117. # Misc
  118. def amp(self):
  119. if hasattr(torch.mlu, 'amp'):
  120. return torch.mlu.amp
  121. return None
  122. def is_available(self):
  123. return torch.mlu.is_available()
  124. def range_push(self, msg):
  125. if hasattr(torch.mlu.cnpx, 'range_push'):
  126. return torch.mlu.cnpx.range_push(msg)
  127. def range_pop(self):
  128. if hasattr(torch.mlu.cnpx, 'range_pop'):
  129. return torch.mlu.cnpx.range_pop()
  130. def lazy_call(self, callback):
  131. return torch.mlu._lazy_call(callback)
  132. def communication_backend_name(self):
  133. return self._communication_backend_name
  134. def is_triton_supported(self):
  135. return True
  136. # Graph operations
  137. def create_graph(self):
  138. torch.mlu.MLUGraph()
  139. def capture_to_graph(self, graph, pool=None, stream=None):
  140. return torch.mlu.graph(graph, pool, stream)
  141. def replay_graph(self, graph):
  142. graph.replay()
  143. return
  144. # Tensor operations
  145. @property
  146. def BFloat16Tensor(self):
  147. return functools.partial(torch.tensor, dtype=torch.bfloat16, device='mlu')
  148. @property
  149. def ByteTensor(self):
  150. return functools.partial(torch.tensor, dtype=torch.uint8, device='mlu')
  151. @property
  152. def DoubleTensor(self):
  153. return functools.partial(torch.tensor, dtype=torch.double, device='mlu')
  154. @property
  155. def FloatTensor(self):
  156. return functools.partial(torch.tensor, dtype=torch.float, device='mlu')
  157. @property
  158. def HalfTensor(self):
  159. return functools.partial(torch.tensor, dtype=torch.half, device='mlu')
  160. @property
  161. def IntTensor(self):
  162. return functools.partial(torch.tensor, dtype=torch.int, device='mlu')
  163. @property
  164. def LongTensor(self):
  165. return functools.partial(torch.tensor, dtype=torch.long, device='mlu')
  166. def pin_memory(self, tensor):
  167. return tensor.pin_memory()
  168. def is_pinned(self, tensor):
  169. return tensor.is_pinned()
  170. def on_accelerator(self, tensor):
  171. device_str = str(tensor.device)
  172. if device_str.startswith('mlu:'):
  173. return True
  174. else:
  175. return False
  176. def op_builder_dir(self):
  177. try:
  178. # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
  179. # if successful this also means we're doing a local install and not JIT compile path
  180. from op_builder import __deepspeed__ # noqa: F401 # type: ignore
  181. return "op_builder.mlu"
  182. except ImportError:
  183. return "deepspeed.ops.op_builder.mlu"
  184. def _lazy_init_class_dict(self):
  185. if self.class_dict:
  186. return
  187. op_builder_module = importlib.import_module(self.op_builder_dir())
  188. # get op builder class from op_builder/mlu/__init__.py
  189. self.class_dict = {}
  190. for class_name, class_obj in inspect.getmembers(op_builder_module, inspect.isclass):
  191. self.class_dict[class_name] = class_obj
  192. # create an instance of op builder and return, name specified by class_name
  193. def create_op_builder(self, class_name):
  194. builder_class = self.get_op_builder(class_name)
  195. return builder_class()
  196. # return an op builder class, name specified by class_name
  197. def get_op_builder(self, class_name):
  198. self._lazy_init_class_dict()
  199. if class_name in self.class_dict:
  200. return self.class_dict[class_name]
  201. else:
  202. return self.class_dict['NotImplementedBuilder']
  203. def build_extension(self):
  204. from torch.utils.cpp_extension import BuildExtension
  205. return BuildExtension
  206. def export_envs(self):
  207. return ['NEUWARE_HOME', 'CNCL', 'LD_LIBRARY', 'PATH']
  208. def visible_devices_envs(self):
  209. return ['MLU_VISIBLE_DEVICES']
  210. def set_visible_devices_envs(self, current_env, local_accelerator_ids):
  211. for env in self.visible_devices_envs():
  212. current_env[env] = ",".join(map(str, local_accelerator_ids))
  213. def get_compile_backend(self):
  214. return self._compile_backend
  215. def set_compile_backend(self, backend):
  216. supported_backends = torch._dynamo.list_backends(exclude_tags=())
  217. if backend in supported_backends:
  218. self._compile_backend = backend
  219. else:
  220. raise ValueError(
  221. f"{backend} not supported by {self.device_name()}. Supported Backends are {supported_backends }")