cpu-inference.yml 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. name: cpu-inference
  2. on:
  3. workflow_dispatch:
  4. concurrency:
  5. group: ${{ github.workflow }}-${{ github.ref }}
  6. cancel-in-progress: true
  7. jobs:
  8. unit-tests:
  9. runs-on: ubuntu-20.04
  10. steps:
  11. - uses: actions/checkout@v3
  12. - id: setup-venv
  13. uses: ./.github/workflows/setup-venv
  14. - name: Detect instruction sets on instance
  15. run: |
  16. lscpu
  17. pip install cmake
  18. git clone https://github.com/intel/intel-extension-for-pytorch
  19. cd intel-extension-for-pytorch/tests/cpu/isa
  20. cmake .
  21. make
  22. ./cpu_features
  23. - name: Install numactl
  24. run: |
  25. sudo apt-get install -y numactl
  26. - name: Install oneCCL Bindings for PyTorch
  27. run: |
  28. python -m pip install intel_extension_for_pytorch
  29. python -m pip install oneccl_bind_pt==2.0 -f https://developer.intel.com/ipex-whl-stable-cpu
  30. - name: Install oneCCL
  31. run: |
  32. git clone https://github.com/oneapi-src/oneCCL
  33. cd oneCCL
  34. mkdir build
  35. cd build
  36. cmake ..
  37. make
  38. make install
  39. #source ./_install/env/setvars.sh
  40. # test whether oneCCL is correctly installed
  41. #mpirun -n 2 ./examples/benchmark/benchmark
  42. - name: Install transformers
  43. run: |
  44. git clone https://github.com/huggingface/transformers
  45. cd transformers
  46. git rev-parse --short HEAD
  47. pip install .
  48. - name: Install deepspeed
  49. run: |
  50. # check why the host does not have AVX2 support
  51. pip install .[dev,1bit,autotuning,inf]
  52. ds_report
  53. - name: Python environment
  54. run: |
  55. pip list
  56. - name: Unit tests
  57. run: |
  58. source oneCCL/build/_install/env/setvars.sh
  59. unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch
  60. cd tests
  61. TRANSFORMERS_CACHE=~/tmp/transformers_cache/ TORCH_EXTENSIONS_DIR=./torch-extensions pytest -m 'seq_inference' unit/
  62. TRANSFORMERS_CACHE=~/tmp/transformers_cache/ TORCH_EXTENSIONS_DIR=./torch-extensions pytest -m 'inference_ops' -m 'inference' unit/