nv-inference.yml 1.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. name: nv-inference
  2. on:
  3. pull_request:
  4. paths-ignore:
  5. - 'docs/**'
  6. - 'blogs/**'
  7. merge_group:
  8. branches: [ master ]
  9. schedule:
  10. - cron: "0 0 * * *"
  11. concurrency:
  12. group: ${{ github.workflow }}-${{ github.ref }}
  13. cancel-in-progress: true
  14. jobs:
  15. unit-tests:
  16. runs-on: [self-hosted, nvidia, cu116, v100]
  17. steps:
  18. - uses: actions/checkout@v3
  19. - id: setup-venv
  20. uses: ./.github/workflows/setup-venv
  21. - name: Install pytorch
  22. run: |
  23. pip install -U --cache-dir $TORCH_CACHE torch==1.13.1 torchvision --extra-index-url https://download.pytorch.org/whl/cu116
  24. python -c "import torch; print('torch:', torch.__version__, torch)"
  25. python -c "import torch; print('CUDA available:', torch.cuda.is_available())"
  26. - name: Install transformers
  27. run: |
  28. git clone https://github.com/huggingface/transformers
  29. cd transformers
  30. git rev-parse --short HEAD
  31. pip install .
  32. - name: Install deepspeed
  33. run: |
  34. pip install .[dev,1bit,autotuning,inf,triton,sd]
  35. ds_report
  36. - name: Python environment
  37. run: |
  38. pip list
  39. - name: Unit tests
  40. run: |
  41. unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch
  42. cd tests
  43. coverage run --concurrency=multiprocessing -m pytest $PYTEST_OPTS -m 'seq_inference' unit/ --torch_ver="1.13" --cuda_ver="11.6"
  44. coverage run --concurrency=multiprocessing -m pytest $PYTEST_OPTS -m 'inference_ops' unit/ --torch_ver="1.13" --cuda_ver="11.6"
  45. coverage run --concurrency=multiprocessing -m pytest $PYTEST_OPTS --forked -n 4 -m 'inference' unit/ --torch_ver="1.13" --cuda_ver="11.6"
  46. - name: Coverage report
  47. run: |
  48. cd tests
  49. coverage combine
  50. coverage report -m