cuda_accelerator.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. import os
  5. import pkgutil
  6. import importlib
  7. from .abstract_accelerator import DeepSpeedAccelerator
  8. # During setup stage torch may not be installed, pass on no torch will
  9. # allow op builder related API to be executed.
  10. try:
  11. import torch.cuda
  12. except ImportError:
  13. pass
  14. class CUDA_Accelerator(DeepSpeedAccelerator):
  15. def __init__(self):
  16. self._name = 'cuda'
  17. self._communication_backend_name = 'nccl'
  18. def is_synchronized_device(self):
  19. return False
  20. # Device APIs
  21. def device_name(self, device_index=None):
  22. if device_index == None:
  23. return 'cuda'
  24. return 'cuda:{}'.format(device_index)
  25. def device(self, device_index=None):
  26. return torch.cuda.device(device_index)
  27. def set_device(self, device_index):
  28. torch.cuda.set_device(device_index)
  29. def current_device(self):
  30. return torch.cuda.current_device()
  31. def current_device_name(self):
  32. return 'cuda:{}'.format(torch.cuda.current_device())
  33. def device_count(self):
  34. return torch.cuda.device_count()
  35. def synchronize(self, device_index=None):
  36. return torch.cuda.synchronize(device_index)
  37. # RNG APIs
  38. def random(self):
  39. return torch.random
  40. def set_rng_state(self, new_state, device_index=None):
  41. if device_index is None:
  42. return torch.cuda.set_rng_state(new_state)
  43. return torch.cuda.set_rng_state(new_state, device_index)
  44. def get_rng_state(self, device_index=None):
  45. if device_index is None:
  46. return torch.cuda.get_rng_state()
  47. return torch.cuda.get_rng_state(device_index)
  48. def manual_seed(self, seed):
  49. return torch.cuda.manual_seed(seed)
  50. def manual_seed_all(self, seed):
  51. return torch.cuda.manual_seed_all(seed)
  52. def initial_seed(self, seed):
  53. return torch.cuda.initial_seed(seed)
  54. def default_generator(self, device_index):
  55. return torch.cuda.default_generators[device_index]
  56. # Streams/Events
  57. @property
  58. def Stream(self):
  59. return torch.cuda.Stream
  60. def stream(self, stream):
  61. return torch.cuda.stream(stream)
  62. def current_stream(self, device_index=None):
  63. return torch.cuda.current_stream(device_index)
  64. def default_stream(self, device_index=None):
  65. return torch.cuda.default_stream(device_index)
  66. @property
  67. def Event(self):
  68. return torch.cuda.Event
  69. # Memory management
  70. def empty_cache(self):
  71. return torch.cuda.empty_cache()
  72. def memory_allocated(self, device_index=None):
  73. return torch.cuda.memory_allocated(device_index)
  74. def max_memory_allocated(self, device_index=None):
  75. return torch.cuda.max_memory_allocated(device_index)
  76. def reset_max_memory_allocated(self, device_index=None):
  77. return torch.cuda.reset_max_memory_allocated(device_index)
  78. def memory_cached(self, device_index=None):
  79. return torch.cuda.memory_cached(device_index)
  80. def max_memory_cached(self, device_index=None):
  81. return torch.cuda.max_memory_cached(device_index)
  82. def reset_max_memory_cached(self, device_index=None):
  83. return torch.cuda.reset_max_memory_cached(device_index)
  84. def memory_stats(self, device_index=None):
  85. if hasattr(torch.cuda, 'memory_stats'):
  86. return torch.cuda.memory_stats(device_index)
  87. def reset_peak_memory_stats(self, device_index=None):
  88. if hasattr(torch.cuda, 'reset_peak_memory_stats'):
  89. return torch.cuda.reset_peak_memory_stats(device_index)
  90. def memory_reserved(self, device_index=None):
  91. if hasattr(torch.cuda, 'memory_reserved'):
  92. return torch.cuda.memory_reserved(device_index)
  93. def max_memory_reserved(self, device_index=None):
  94. if hasattr(torch.cuda, 'max_memory_reserved'):
  95. return torch.cuda.max_memory_reserved(device_index)
  96. def total_memory(self, device_index=None):
  97. return torch.cuda.get_device_properties(device_index).total_memory
  98. # Data types
  99. def is_bf16_supported(self):
  100. return torch.cuda.is_bf16_supported()
  101. def is_fp16_supported(self):
  102. major, _ = torch.cuda.get_device_capability()
  103. if major >= 7:
  104. return True
  105. else:
  106. return False
  107. # Misc
  108. def amp(self):
  109. if hasattr(torch.cuda, 'amp'):
  110. return torch.cuda.amp
  111. return None
  112. def is_available(self):
  113. return torch.cuda.is_available()
  114. def range_push(self, msg):
  115. if hasattr(torch.cuda.nvtx, 'range_push'):
  116. return torch.cuda.nvtx.range_push(msg)
  117. def range_pop(self):
  118. if hasattr(torch.cuda.nvtx, 'range_pop'):
  119. return torch.cuda.nvtx.range_pop()
  120. def lazy_call(self, callback):
  121. return torch.cuda._lazy_call(callback)
  122. def communication_backend_name(self):
  123. return self._communication_backend_name
  124. # Tensor operations
  125. @property
  126. def BFloat16Tensor(self):
  127. return torch.cuda.BFloat16Tensor
  128. @property
  129. def ByteTensor(self):
  130. return torch.cuda.ByteTensor
  131. @property
  132. def DoubleTensor(self):
  133. return torch.cuda.DoubleTensor
  134. @property
  135. def FloatTensor(self):
  136. return torch.cuda.FloatTensor
  137. @property
  138. def HalfTensor(self):
  139. return torch.cuda.HalfTensor
  140. @property
  141. def IntTensor(self):
  142. return torch.cuda.IntTensor
  143. @property
  144. def LongTensor(self):
  145. return torch.cuda.LongTensor
  146. def pin_memory(self, tensor):
  147. return tensor.pin_memory()
  148. def on_accelerator(self, tensor):
  149. device_str = str(tensor.device)
  150. if device_str.startswith('cuda:'):
  151. return True
  152. else:
  153. return False
  154. def op_builder_dir(self):
  155. try:
  156. # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
  157. # if successful this also means we're doing a local install and not JIT compile path
  158. from op_builder import __deepspeed__ # noqa: F401
  159. return "op_builder"
  160. except ImportError:
  161. return "deepspeed.ops.op_builder"
  162. # dict that holds class name <--> class type mapping i.e.
  163. # 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
  164. # this dict will be filled at init stage
  165. class_dict = None
  166. def _lazy_init_class_dict(self):
  167. if self.class_dict != None:
  168. return
  169. else:
  170. self.class_dict = {}
  171. # begin initialize for create_op_builder()
  172. # put all valid class name <--> class type mapping into class_dict
  173. op_builder_dir = self.op_builder_dir()
  174. op_builder_module = importlib.import_module(op_builder_dir)
  175. for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]):
  176. # avoid self references
  177. if module_name != 'all_ops' and module_name != 'builder' and module_name != 'cpu':
  178. module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
  179. for member_name in module.__dir__():
  180. if member_name.endswith(
  181. 'Builder'
  182. ) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes
  183. if not member_name in self.class_dict:
  184. self.class_dict[member_name] = getattr(module, member_name)
  185. # end initialize for create_op_builder()
  186. # create an instance of op builder and return, name specified by class_name
  187. def create_op_builder(self, class_name):
  188. self._lazy_init_class_dict()
  189. if class_name in self.class_dict:
  190. return self.class_dict[class_name]()
  191. else:
  192. return None
  193. # return an op builder class, name specified by class_name
  194. def get_op_builder(self, class_name):
  195. self._lazy_init_class_dict()
  196. if class_name in self.class_dict:
  197. return self.class_dict[class_name]
  198. else:
  199. return None
  200. def build_extension(self):
  201. from torch.utils.cpp_extension import BuildExtension
  202. return BuildExtension