nv-torch18-v100.yml 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. name: nv-torch18-v100
  2. on:
  3. push:
  4. branches:
  5. - 'master'
  6. - 'staging**'
  7. paths-ignore:
  8. - 'docs/**'
  9. pull_request:
  10. paths-ignore:
  11. - 'docs/**'
  12. concurrency:
  13. group: ${{ github.workflow }}-${{ github.ref }}
  14. cancel-in-progress: true
  15. jobs:
  16. unit-tests:
  17. runs-on: [self-hosted, nvidia, cu111, v100]
  18. steps:
  19. - uses: actions/checkout@v2
  20. - name: environment
  21. run: |
  22. echo "JobID: $AISC_NODE_INSTANCE_ID"
  23. nvidia-smi
  24. which python
  25. python --version
  26. which nvcc
  27. nvcc --version
  28. pip install --upgrade pip
  29. pip uninstall --yes torch torchvision triton
  30. pip install torch==1.8.2+cu111 torchvision==0.9.2+cu111 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
  31. python -c "import torch; print('torch:', torch.__version__, torch)"
  32. python -c "import torch; print('CUDA available:', torch.cuda.is_available())"
  33. - name: Install transformers
  34. run: |
  35. git clone https://github.com/huggingface/transformers
  36. cd transformers
  37. # if needed switch to the last known good SHA until transformers@master is fixed
  38. # git checkout 1cc453d33
  39. git rev-parse --short HEAD
  40. pip uninstall --yes transformers
  41. pip install .
  42. - name: Python environment
  43. run: |
  44. pip list
  45. - name: Install deepspeed
  46. run: |
  47. pip uninstall --yes deepspeed
  48. pip install .[dev,1bit,autotuning]
  49. ds_report
  50. - name: Unit tests
  51. run: |
  52. unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch
  53. if [[ -d ./torch-extensions ]]; then rm -rf ./torch-extensions; fi
  54. cd tests
  55. TORCH_EXTENSIONS_DIR=./torch-extensions pytest --color=yes --durations=0 --forked --verbose -n 4 unit/ --torch_ver="1.8" --cuda_ver="11.1"
  56. TORCH_EXTENSIONS_DIR=./torch-extensions pytest --color=yes --durations=0 --forked --verbose -m 'sequential' unit/ --torch_ver="1.8" --cuda_ver="11.1"