12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061 |
- name: nv-inference
- on:
- pull_request:
- paths-ignore:
- - 'docs/**'
- - 'blogs/**'
- merge_group:
- branches: [ master ]
- schedule:
- - cron: "0 0 * * *"
- concurrency:
- group: ${{ github.workflow }}-${{ github.ref }}
- cancel-in-progress: true
- jobs:
- unit-tests:
- runs-on: [self-hosted, nvidia, cu116, v100]
- steps:
- - uses: actions/checkout@v3
- - id: setup-venv
- uses: ./.github/workflows/setup-venv
- - name: Install pytorch
- run: |
- pip install -U --cache-dir $TORCH_CACHE torch==1.13.1 torchvision --extra-index-url https://download.pytorch.org/whl/cu116
- python -c "import torch; print('torch:', torch.__version__, torch)"
- python -c "import torch; print('CUDA available:', torch.cuda.is_available())"
- - name: Install transformers
- run: |
- git clone https://github.com/huggingface/transformers
- cd transformers
- git rev-parse --short HEAD
- pip install .
- - name: Install deepspeed
- run: |
- pip install .[dev,1bit,autotuning,inf,triton,sd]
- ds_report
- - name: Python environment
- run: |
- pip list
- - name: Unit tests
- run: |
- unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch
- cd tests
- coverage run --concurrency=multiprocessing -m pytest $PYTEST_OPTS -m 'seq_inference' unit/ --torch_ver="1.13" --cuda_ver="11.6"
- coverage run --concurrency=multiprocessing -m pytest $PYTEST_OPTS -m 'inference_ops' unit/ --torch_ver="1.13" --cuda_ver="11.6"
- coverage run --concurrency=multiprocessing -m pytest $PYTEST_OPTS --forked -n 4 -m 'inference' unit/ --torch_ver="1.13" --cuda_ver="11.6"
- - name: Coverage report
- run: |
- cd tests
- coverage combine
- coverage report -m
|