Skip to content

Enabling CI for AMD with new runner.. #2827

Enabling CI for AMD with new runner..

Enabling CI for AMD with new runner.. #2827

Workflow file for this run

name: Build and push docker image to internal registry
on:
workflow_dispatch:
push:
branches:
- 'main'
tags:
- 'v*'
pull_request:
paths:
- ".github/workflows/build.yaml"
- "integration-tests/**"
- "server/**"
- "proto/**"
- "router/**"
- "launcher/**"
- "Cargo.lock"
- "rust-toolchain.toml"
- "Dockerfile"
- "Dockerfile_amd"
- "Dockerfile_intel"
branches:
- 'main'
jobs:
# build-and-push-image:
# concurrency:
# group: ${{ github.workflow }}-build-and-push-image-${{ matrix.name }}-${{ github.head_ref || github.run_id }}
# cancel-in-progress: true
# runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci]
# strategy:
# matrix:
# include:
# - name: "cuda"
# label: ""
# dockerfile: "Dockerfile"
# - name: "amd"
# label: "-rocm"
# dockerfile: "Dockerfile_amd"
# - name: "intel"
# label: "-intel"
# dockerfile: "Dockerfile_intel"
# permissions:
# contents: write
# packages: write
# # This is used to complete the identity challenge
# # with sigstore/fulcio when running outside of PRs.
# id-token: write
# security-events: write
# steps:
# - name: Checkout repository
# uses: actions/checkout@v3
# - name: Initialize Docker Buildx
# uses: docker/[email protected]
# with:
# install: true
# - name: Inject slug/short variables
# uses: rlespinasse/[email protected]
# - name: Tailscale
# uses: huggingface/tailscale-action@main
# with:
# authkey: ${{ secrets.TAILSCALE_AUTHKEY }}
# - name: Login to GitHub Container Registry
# if: github.event_name != 'pull_request'
# uses: docker/login-action@v2
# with:
# registry: ghcr.io
# username: ${{ github.actor }}
# password: ${{ secrets.GITHUB_TOKEN }}
# - name: Login to internal Container Registry
# uses: docker/[email protected]
# with:
# username: ${{ secrets.TAILSCALE_DOCKER_USERNAME }}
# password: ${{ secrets.TAILSCALE_DOCKER_PASSWORD }}
# registry: registry.internal.huggingface.tech
# - name: Login to Azure Container Registry
# if: github.event_name != 'pull_request'
# uses: docker/[email protected]
# with:
# username: ${{ secrets.AZURE_DOCKER_USERNAME }}
# password: ${{ secrets.AZURE_DOCKER_PASSWORD }}
# registry: db4c2190dd824d1f950f5d1555fbadf0.azurecr.io
# # If pull request
# - name: Extract metadata (tags, labels) for Docker
# if: ${{ github.event_name == 'pull_request' }}
# id: meta-pr
# uses: docker/[email protected]
# with:
# images: |
# registry.internal.huggingface.tech/api-inference/community/text-generation-inference
# tags: |
# type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }}
# # If main, release or tag
# - name: Extract metadata (tags, labels) for Docker
# if: ${{ github.event_name != 'pull_request' }}
# id: meta
# uses: docker/[email protected]
# with:
# flavor: |
# latest=auto
# images: |
# registry.internal.huggingface.tech/api-inference/community/text-generation-inference
# ghcr.io/huggingface/text-generation-inference
# db4c2190dd824d1f950f5d1555fbadf0.azurecr.io/text-generation-inference
# tags: |
# type=semver,pattern={{version}}${{ matrix.label }}
# type=semver,pattern={{major}}.{{minor}}${{ matrix.label }}
# type=raw,value=latest${{ matrix.label }},enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }}
# type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }}
# - name: Build and push Docker image
# id: build-and-push
# uses: docker/build-push-action@v4
# with:
# context: .
# file: ${{ matrix.dockerfile }}
# push: true
# platforms: 'linux/amd64'
# build-args: |
# GIT_SHA=${{ env.GITHUB_SHA }}
# DOCKER_LABEL=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }}
# tags: ${{ steps.meta.outputs.tags || steps.meta-pr.outputs.tags }}
# labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }}
# cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min
# cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min
# integration-tests-cuda:
# concurrency:
# group: ${{ github.workflow }}-${{ github.job }}-cuda-${{ github.head_ref || github.run_id }}
# cancel-in-progress: true
# runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci]
# needs: build-and-push-image
# steps:
# - name: Checkout repository
# uses: actions/checkout@v4
# - name: Inject slug/short variables
# uses: rlespinasse/[email protected]
# - name: Set up Python
# uses: actions/setup-python@v4
# with:
# python-version: 3.9
# - name: Install
# run: |
# make install-integration-tests
# - name: Run tests
# run: |
# export DOCKER_VOLUME=/mnt/cache
# export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }}
# export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }}
# pytest -s -vv integration-tests
integration-tests-rocm:

Check failure on line 150 in .github/workflows/build.yaml

View workflow run for this annotation

GitHub Actions / Build and push docker image to internal registry

Invalid workflow file

The workflow is not valid. .github/workflows/build.yaml (Line: 150, Col: 3): The workflow must contain at least one job with no dependencies.
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-rocm-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
runs-on: [amd-gpu-tgi, multi-gpu, mi250]
needs: build-and-push-image
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Python 3.10
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Inject slug/short variables
uses: rlespinasse/[email protected]
- name: install deps
run: |
make install-integration-tests
- name: ROCM-SMI
run: |
rocm-smi
- name: Show ROCR environment
run: |
echo "ROCR: $ROCR_VISIBLE_DEVICES"
- name: Run tests
run: |
export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }}
export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-025f40a-rocm
export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }}
export DOCKER_DEVICES=/dev/kfd,/dev/dri
python -m pytest -s -vv integration-tests/models/test_flash_gpt2.py