sparse_attn.py 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. # Copyright (c) Microsoft Corporation.
  2. # SPDX-License-Identifier: Apache-2.0
  3. # DeepSpeed Team
  4. from .builder import OpBuilder
  5. try:
  6. from packaging import version as pkg_version
  7. except ImportError:
  8. pkg_version = None
  9. class SparseAttnBuilder(OpBuilder):
  10. BUILD_VAR = "DS_BUILD_SPARSE_ATTN"
  11. NAME = "sparse_attn"
  12. def __init__(self):
  13. super().__init__(name=self.NAME)
  14. def absolute_name(self):
  15. return f'deepspeed.ops.sparse_attention.{self.NAME}_op'
  16. def sources(self):
  17. return ['csrc/sparse_attention/utils.cpp']
  18. def cxx_args(self):
  19. return ['-O2', '-fopenmp']
  20. def is_compatible(self, verbose=False):
  21. # Check to see if llvm and cmake are installed since they are dependencies
  22. #required_commands = ['llvm-config|llvm-config-9', 'cmake']
  23. #command_status = list(map(self.command_exists, required_commands))
  24. #deps_compatible = all(command_status)
  25. if self.is_rocm_pytorch():
  26. if verbose:
  27. self.warning(f'{self.NAME} is not compatible with ROCM')
  28. return False
  29. try:
  30. import torch
  31. except ImportError:
  32. if verbose:
  33. self.warning(f"unable to import torch, please install it first")
  34. return False
  35. # torch-cpu will not have a cuda version
  36. if torch.version.cuda is None:
  37. cuda_compatible = False
  38. if verbose:
  39. self.warning(f"{self.NAME} cuda is not available from torch")
  40. else:
  41. major, minor = torch.version.cuda.split('.')[:2]
  42. cuda_compatible = (int(major) == 10 and int(minor) >= 1) or (int(major) >= 11)
  43. if not cuda_compatible:
  44. if verbose:
  45. self.warning(f"{self.NAME} requires CUDA version 10.1+")
  46. TORCH_MAJOR = int(torch.__version__.split('.')[0])
  47. TORCH_MINOR = int(torch.__version__.split('.')[1])
  48. torch_compatible = (TORCH_MAJOR == 1 and TORCH_MINOR >= 5)
  49. if not torch_compatible:
  50. if verbose:
  51. self.warning(
  52. f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}')
  53. try:
  54. import triton
  55. except ImportError:
  56. # auto-install of triton is broken on some systems, reverting to manual install for now
  57. # see this issue: https://github.com/microsoft/DeepSpeed/issues/1710
  58. if verbose:
  59. self.warning(f"please install triton==1.0.0 if you want to use sparse attention")
  60. return False
  61. if pkg_version:
  62. installed_triton = pkg_version.parse(triton.__version__)
  63. triton_mismatch = installed_triton != pkg_version.parse("1.0.0")
  64. else:
  65. installed_triton = triton.__version__
  66. triton_mismatch = installed_triton != "1.0.0"
  67. if triton_mismatch:
  68. if verbose:
  69. self.warning(
  70. f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible")
  71. return False
  72. return super().is_compatible(verbose) and torch_compatible and cuda_compatible