inference_cutlass_builder.py 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. import os
  5. from .builder import CUDAOpBuilder, installed_cuda_version
  6. class InferenceCutlassBuilder(CUDAOpBuilder):
  7. BUILD_VAR = "DS_BUILD_CUTLASS_OPS"
  8. NAME = "cutlass_ops"
  9. def __init__(self, name=None):
  10. name = self.NAME if name is None else name
  11. super().__init__(name=name)
  12. def absolute_name(self):
  13. return f'deepspeed.inference.v2.kernels.cutlass_ops.{self.NAME}'
  14. def is_compatible(self, verbose=True):
  15. try:
  16. import torch
  17. except ImportError:
  18. self.warning("Please install torch if trying to pre-compile inference kernels")
  19. return False
  20. cuda_okay = True
  21. if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda
  22. sys_cuda_major, _ = installed_cuda_version()
  23. torch_cuda_major = int(torch.version.cuda.split('.')[0])
  24. cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda
  25. if cuda_capability < 6:
  26. self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
  27. cuda_okay = False
  28. if cuda_capability >= 8:
  29. if torch_cuda_major < 11 or sys_cuda_major < 11:
  30. self.warning("On Ampere and higher architectures please use CUDA 11+")
  31. cuda_okay = False
  32. return super().is_compatible(verbose) and cuda_okay
  33. def filter_ccs(self, ccs):
  34. ccs_retained = []
  35. ccs_pruned = []
  36. for cc in ccs:
  37. if int(cc[0]) >= 8:
  38. # Only support Ampere and newer
  39. ccs_retained.append(cc)
  40. else:
  41. ccs_pruned.append(cc)
  42. if len(ccs_pruned) > 0:
  43. self.warning(f"Filtered compute capabilities {ccs_pruned}")
  44. return ccs_retained
  45. def get_prefix(self):
  46. ds_path = self.deepspeed_src_path("deepspeed")
  47. return "deepspeed" if os.path.isdir(ds_path) else ".."
  48. def sources(self):
  49. sources = [
  50. "inference/v2/kernels/cutlass_ops/cutlass_ops.cpp",
  51. "inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.cu",
  52. "inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.cu",
  53. ]
  54. prefix = self.get_prefix()
  55. sources = [os.path.join(prefix, src) for src in sources]
  56. return sources
  57. def extra_ldflags(self):
  58. import dskernels
  59. lib_path = dskernels.library_path()
  60. prefix = self.get_prefix()
  61. lib_path = os.path.join(prefix, lib_path)
  62. lib_path = self.deepspeed_src_path(lib_path)
  63. args = [f'-L{lib_path}', '-ldeepspeedft']
  64. if self.jit_load:
  65. args.append(f'-Wl,-rpath,{lib_path}')
  66. return args
  67. def include_paths(self):
  68. sources = [
  69. 'inference/v2/kernels/includes',
  70. 'inference/v2/kernels/cutlass_ops/mixed_gemm',
  71. 'inference/v2/kernels/cutlass_ops/moe_gemm',
  72. 'inference/v2/kernels/cutlass_ops/shared_resources/',
  73. ]
  74. prefix = self.get_prefix()
  75. sources = [os.path.join(prefix, src) for src in sources]
  76. return sources