diff --git a/.github/workflows/nv-flash-attn.yml b/.github/workflows/nv-flash-attn.yml index 08e57ea6f668..a689f64d9430 100644 --- a/.github/workflows/nv-flash-attn.yml +++ b/.github/workflows/nv-flash-attn.yml @@ -6,8 +6,6 @@ on: paths: - 'deepspeed/sequence/**' - '.github/workflows/nv-flash-attn.yml' - merge_group: - branches: [ master ] concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -24,35 +22,28 @@ jobs: steps: - uses: actions/checkout@v4 - - id: setup-venv - uses: ./.github/workflows/setup-venv - - - name: Install pytorch + - name: Check container state run: | - pip install -U --cache-dir $TORCH_CACHE torch torchvision --index-url https://download.pytorch.org/whl/cu121 + ldd --version + nvcc --version + nvidia-smi python -c "import torch; print('torch:', torch.__version__, torch)" python -c "import torch; print('CUDA available:', torch.cuda.is_available())" - - name: Install transformers run: | - git clone https://github.com/huggingface/transformers + git clone --depth=1 https://github.com/huggingface/transformers cd transformers - # if needed switch to the last known good SHA until transformers@master is fixed - # git checkout 1cc453d33 git rev-parse --short HEAD - pip install . - + python -m pip install . - name: Install deepspeed run: | - pip install .[dev,flash_attn] + python -m pip install .[dev,flash_attn] ds_report - - name: Python environment run: | - pip list - + python -m pip list - name: Unit tests run: | unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch cd tests - pytest $PYTEST_OPTS --forked -n 4 unit/sequence_parallelism/test_ulysses.py --torch_ver="2.5" --cuda_ver="12.1" + python -m pytest --color=yes --durations=0 --verbose -rF unit/sequence_parallelism/test_ulysses.py --torch_ver="2.3" --cuda_ver="12"