cuda_accelerator.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. import os
  5. import pkgutil
  6. import importlib
  7. from .abstract_accelerator import DeepSpeedAccelerator
  8. # During setup stage torch may not be installed, pass on no torch will
  9. # allow op builder related API to be executed.
  10. try:
  11. import torch.cuda
  12. except ImportError:
  13. pass
  14. class CUDA_Accelerator(DeepSpeedAccelerator):
  15. def __init__(self):
  16. self._name = 'cuda'
  17. self._communication_backend_name = 'nccl'
  18. def is_synchronized_device(self):
  19. return False
  20. # Device APIs
  21. def device_name(self, device_index=None):
  22. if device_index == None:
  23. return 'cuda'
  24. return 'cuda:{}'.format(device_index)
  25. def device(self, device_index=None):
  26. return torch.cuda.device(device_index)
  27. def set_device(self, device_index):
  28. torch.cuda.set_device(device_index)
  29. def current_device(self):
  30. return torch.cuda.current_device()
  31. def current_device_name(self):
  32. return 'cuda:{}'.format(torch.cuda.current_device())
  33. def device_count(self):
  34. return torch.cuda.device_count()
  35. def synchronize(self, device_index=None):
  36. return torch.cuda.synchronize(device_index)
  37. # RNG APIs
  38. def random(self):
  39. return torch.random
  40. def set_rng_state(self, new_state, device_index=None):
  41. if device_index is None:
  42. return torch.cuda.set_rng_state(new_state)
  43. return torch.cuda.set_rng_state(new_state, device_index)
  44. def get_rng_state(self, device_index=None):
  45. if device_index is None:
  46. return torch.cuda.get_rng_state()
  47. return torch.cuda.get_rng_state(device_index)
  48. def manual_seed(self, seed):
  49. return torch.cuda.manual_seed(seed)
  50. def manual_seed_all(self, seed):
  51. return torch.cuda.manual_seed_all(seed)
  52. def initial_seed(self, seed):
  53. return torch.cuda.initial_seed(seed)
  54. def default_generator(self, device_index):
  55. return torch.cuda.default_generators[device_index]
  56. # Streams/Events
  57. @property
  58. def Stream(self):
  59. return torch.cuda.Stream
  60. def stream(self, stream):
  61. return torch.cuda.stream(stream)
  62. def current_stream(self, device_index=None):
  63. return torch.cuda.current_stream(device_index)
  64. def default_stream(self, device_index=None):
  65. return torch.cuda.default_stream(device_index)
  66. @property
  67. def Event(self):
  68. return torch.cuda.Event
  69. # Memory management
  70. def empty_cache(self):
  71. return torch.cuda.empty_cache()
  72. def memory_allocated(self, device_index=None):
  73. return torch.cuda.memory_allocated(device_index)
  74. def max_memory_allocated(self, device_index=None):
  75. return torch.cuda.max_memory_allocated(device_index)
  76. def reset_max_memory_allocated(self, device_index=None):
  77. return torch.cuda.reset_max_memory_allocated(device_index)
  78. def memory_cached(self, device_index=None):
  79. return torch.cuda.memory_cached(device_index)
  80. def max_memory_cached(self, device_index=None):
  81. return torch.cuda.max_memory_cached(device_index)
  82. def reset_max_memory_cached(self, device_index=None):
  83. return torch.cuda.reset_max_memory_cached(device_index)
  84. def memory_stats(self, device_index=None):
  85. if hasattr(torch.cuda, 'memory_stats'):
  86. return torch.cuda.memory_stats(device_index)
  87. def reset_peak_memory_stats(self, device_index=None):
  88. if hasattr(torch.cuda, 'reset_peak_memory_stats'):
  89. return torch.cuda.reset_peak_memory_stats(device_index)
  90. def memory_reserved(self, device_index=None):
  91. if hasattr(torch.cuda, 'memory_reserved'):
  92. return torch.cuda.memory_reserved(device_index)
  93. def max_memory_reserved(self, device_index=None):
  94. if hasattr(torch.cuda, 'max_memory_reserved'):
  95. return torch.cuda.max_memory_reserved(device_index)
  96. def total_memory(self, device_index=None):
  97. return torch.cuda.get_device_properties(device_index).total_memory
  98. # Data types
  99. def is_bf16_supported(self):
  100. return torch.cuda.is_bf16_supported()
  101. def is_fp16_supported(self):
  102. major, _ = torch.cuda.get_device_capability()
  103. if major >= 7:
  104. return True
  105. else:
  106. return False
  107. def supported_dtypes(self):
  108. return [torch.float, torch.half, torch.bfloat16]
  109. # Misc
  110. def amp(self):
  111. if hasattr(torch.cuda, 'amp'):
  112. return torch.cuda.amp
  113. return None
  114. def is_available(self):
  115. return torch.cuda.is_available()
  116. def range_push(self, msg):
  117. if hasattr(torch.cuda.nvtx, 'range_push'):
  118. return torch.cuda.nvtx.range_push(msg)
  119. def range_pop(self):
  120. if hasattr(torch.cuda.nvtx, 'range_pop'):
  121. return torch.cuda.nvtx.range_pop()
  122. def lazy_call(self, callback):
  123. return torch.cuda._lazy_call(callback)
  124. def communication_backend_name(self):
  125. return self._communication_backend_name
  126. # Tensor operations
  127. @property
  128. def BFloat16Tensor(self):
  129. return torch.cuda.BFloat16Tensor
  130. @property
  131. def ByteTensor(self):
  132. return torch.cuda.ByteTensor
  133. @property
  134. def DoubleTensor(self):
  135. return torch.cuda.DoubleTensor
  136. @property
  137. def FloatTensor(self):
  138. return torch.cuda.FloatTensor
  139. @property
  140. def HalfTensor(self):
  141. return torch.cuda.HalfTensor
  142. @property
  143. def IntTensor(self):
  144. return torch.cuda.IntTensor
  145. @property
  146. def LongTensor(self):
  147. return torch.cuda.LongTensor
  148. def pin_memory(self, tensor):
  149. return tensor.pin_memory()
  150. def on_accelerator(self, tensor):
  151. device_str = str(tensor.device)
  152. if device_str.startswith('cuda:'):
  153. return True
  154. else:
  155. return False
  156. def op_builder_dir(self):
  157. try:
  158. # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
  159. # if successful this also means we're doing a local install and not JIT compile path
  160. from op_builder import __deepspeed__ # noqa: F401
  161. return "op_builder"
  162. except ImportError:
  163. return "deepspeed.ops.op_builder"
  164. # dict that holds class name <--> class type mapping i.e.
  165. # 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
  166. # this dict will be filled at init stage
  167. class_dict = None
  168. def _lazy_init_class_dict(self):
  169. if self.class_dict != None:
  170. return
  171. else:
  172. self.class_dict = {}
  173. # begin initialize for create_op_builder()
  174. # put all valid class name <--> class type mapping into class_dict
  175. op_builder_dir = self.op_builder_dir()
  176. op_builder_module = importlib.import_module(op_builder_dir)
  177. op_builder_absolute_path = os.path.dirname(op_builder_module.__file__)
  178. for _, module_name, _ in pkgutil.iter_modules([op_builder_absolute_path]):
  179. # avoid self references,
  180. # skip sub_directories which contains ops for other backend(cpu, npu, etc.).
  181. if module_name != 'all_ops' and module_name != 'builder' and not os.path.isdir(
  182. os.path.join(op_builder_absolute_path, module_name)):
  183. module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
  184. for member_name in module.__dir__():
  185. if member_name.endswith(
  186. 'Builder'
  187. ) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes
  188. if not member_name in self.class_dict:
  189. self.class_dict[member_name] = getattr(module, member_name)
  190. # end initialize for create_op_builder()
  191. # create an instance of op builder and return, name specified by class_name
  192. def create_op_builder(self, class_name):
  193. self._lazy_init_class_dict()
  194. if class_name in self.class_dict:
  195. return self.class_dict[class_name]()
  196. else:
  197. return None
  198. # return an op builder class, name specified by class_name
  199. def get_op_builder(self, class_name):
  200. self._lazy_init_class_dict()
  201. if class_name in self.class_dict:
  202. return self.class_dict[class_name]
  203. else:
  204. return None
  205. def build_extension(self):
  206. from torch.utils.cpp_extension import BuildExtension
  207. return BuildExtension