nv-torch18-v100.yml 2.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. name: nv-torch18-v100
  2. on:
  3. push:
  4. branches:
  5. - 'master'
  6. - 'staging**'
  7. paths-ignore:
  8. - 'docs/**'
  9. pull_request:
  10. paths-ignore:
  11. - 'docs/**'
  12. concurrency:
  13. group: ${{ github.workflow }}-${{ github.ref }}
  14. cancel-in-progress: true
  15. jobs:
  16. unit-tests:
  17. runs-on: [self-hosted, nvidia, cu111, v100]
  18. steps:
  19. - uses: actions/checkout@v2
  20. - name: environment
  21. run: |
  22. nvidia-smi
  23. which python
  24. python --version
  25. which nvcc
  26. nvcc --version
  27. pip install --upgrade pip
  28. pip uninstall --yes torch torchvision
  29. pip install torch==1.8.2+cu111 torchvision==0.9.2+cu111 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
  30. python -c "import torch; print('torch:', torch.__version__, torch)"
  31. python -c "import torch; print('CUDA available:', torch.cuda.is_available())"
  32. - name: Install transformers
  33. run: |
  34. git clone https://github.com/huggingface/transformers
  35. cd transformers
  36. # if needed switch to the last known good SHA until transformers@master is fixed
  37. # git checkout 1cc453d33
  38. git rev-parse --short HEAD
  39. pip uninstall --yes transformers
  40. pip install .
  41. - name: Python environment
  42. run: |
  43. pip list
  44. - name: Install deepspeed
  45. run: |
  46. pip uninstall --yes deepspeed
  47. pip install .[dev,1bit,autotuning,sparse_attn]
  48. ds_report
  49. - name: Unit tests
  50. run: |
  51. unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch
  52. if [[ -d ./torch-extensions ]]; then rm -rf ./torch-extensions; fi
  53. cd tests
  54. TORCH_EXTENSIONS_DIR=./torch-extensions pytest --color=yes --durations=0 --forked --verbose -n 4 unit/ --torch_ver="1.8" --cuda_ver="11.1"
  55. TORCH_EXTENSIONS_DIR=./torch-extensions pytest --color=yes --durations=0 --forked --verbose -m 'sequential' unit/ --torch_ver="1.8" --cuda_ver="11.1"