From 13901368d48d4989377023fa2abf4e18a72e9f1f Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 5 Jun 2024 17:50:03 +0200 Subject: [PATCH 01/16] Internal runner ? --- .github/workflows/build.yaml | 110 ++--------------------------------- 1 file changed, 4 insertions(+), 106 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 432d20df3a5..718f9bc3be7 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -22,47 +22,11 @@ on: - 'main' jobs: - start-runner: - name: Start self-hosted EC2 runner - runs-on: ubuntu-latest - env: - AWS_REGION: us-east-1 - EC2_AMI_ID: ami-0789b6925c11b1fb2 - EC2_INSTANCE_TYPE: g5.12xlarge - EC2_SUBNET_ID: subnet-931b34f5,subnet-ecb993cd,subnet-943dc2d8,subnet-45371f1a,subnet-ee93e0df,subnet-fddc3dfc - EC2_SECURITY_GROUP: sg-030175c435ac141d6 - outputs: - label: ${{ steps.start-ec2-runner.outputs.label }} - ec2-instance-id: ${{ steps.start-ec2-runner.outputs.ec2-instance-id }} - steps: - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ env.AWS_REGION }} - - name: Start EC2 runner - id: start-ec2-runner - uses: philschmid/philschmid-ec2-github-runner@main - with: - mode: start - github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - ec2-image-id: ${{ env.EC2_AMI_ID }} - ec2-instance-type: ${{ env.EC2_INSTANCE_TYPE }} - subnet-id: ${{ env.EC2_SUBNET_ID }} - security-group-id: ${{ env.EC2_SECURITY_GROUP }} - aws-resource-tags: > # optional, requires additional permissions - [ - {"Key": "Name", "Value": "ec2-tgi-github-runner"}, - {"Key": "GitHubRepository", "Value": "${{ github.repository }}"} - ] - build-and-push-image: concurrency: group: ${{ github.workflow }}-build-and-push-image-${{ github.head_ref || github.run_id }} cancel-in-progress: true - needs: start-runner # required to start the main job when the runner is ready - runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner + runs-on: [self-hosted, nvidia-gpu , multi-gpu, a10, ci] permissions: contents: write packages: write @@ -151,9 +115,8 @@ jobs: group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true needs: - - start-runner - build-and-push-image # Wait for the docker image to be built - runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner + runs-on: [self-hosted, nvidia-gpu , multi-gpu, a10, ci] env: DOCKER_VOLUME: /cache steps: @@ -187,10 +150,9 @@ jobs: group: ${{ github.workflow }}-build-and-push-image-rocm-${{ github.head_ref || github.run_id }} cancel-in-progress: true needs: - - start-runner - build-and-push-image # Wait for the main docker image to be built - integration-tests # Wait for the main integration-tests - runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner + runs-on: [self-hosted, nvidia-gpu , multi-gpu, a10, ci] permissions: contents: write packages: write @@ -279,10 +241,9 @@ jobs: group: ${{ github.workflow }}-build-and-push-image-intel-${{ github.head_ref || github.run_id }} cancel-in-progress: true needs: - - start-runner - build-and-push-image # Wait for the main docker image to be built - integration-tests # Wait for the main integration-tests - runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner + runs-on: [self-hosted, nvidia-gpu , multi-gpu, a10, ci] permissions: contents: write packages: write @@ -368,66 +329,3 @@ jobs: labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }} cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache-intel,mode=min cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache-intel,mode=min - - stop-runner: - name: Stop self-hosted EC2 runner - needs: - - start-runner - - build-and-push-image - - build-and-push-image-rocm - - build-and-push-image-intel - - integration-tests - runs-on: ubuntu-latest - env: - AWS_REGION: us-east-1 - if: ${{ always() }} # required to stop the runner even if the error happened in the previous jobs - steps: - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ env.AWS_REGION }} - - name: Stop EC2 runner - uses: philschmid/philschmid-ec2-github-runner@main - with: - mode: stop - github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - label: ${{ needs.start-runner.outputs.label }} - ec2-instance-id: ${{ needs.start-runner.outputs.ec2-instance-id }} - - # TODO: Move this to `build_amd.yml` (and `build_nvidia.yml`) - - # integration-tests-rocm: - # concurrency: - # group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }} - # cancel-in-progress: true - # needs: - # - start-runner - # - build-and-push-image - # - integration-tests - # - build-and-push-image-rocm - # - stop-runner - # runs-on: [self-hosted, amd-gpu, multi-gpu, mi300] - # container: - # image: registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ needs.build-and-push-image-rocm.outputs.short_sha }}-rocm - # options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/cache - # env: - # DOCKER_VOLUME: /cache - # steps: - # - name: ROCM-SMI - # run: | - # rocm-smi - # - name: ROCM-INFO - # run: | - # rocminfo | grep "Agent" -A 14 - # - name: Show ROCR environment - # run: | - # echo "ROCR: $ROCR_VISIBLE_DEVICES" - # - name: Install - # run: | - # make install-integration-tests - # - name: Run tests - # run: | - # export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }} - # pytest -s -vv integration-tests From 051b55f3cce6051a5a13ffbe55c9efc5a56c2a1c Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 5 Jun 2024 18:16:05 +0200 Subject: [PATCH 02/16] Fix. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 718f9bc3be7..d7dc5d47848 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -26,7 +26,7 @@ jobs: concurrency: group: ${{ github.workflow }}-build-and-push-image-${{ github.head_ref || github.run_id }} cancel-in-progress: true - runs-on: [self-hosted, nvidia-gpu , multi-gpu, a10, ci] + runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] permissions: contents: write packages: write From 15d953afcbda3fff2511f42a939de6aeca2133bb Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 5 Jun 2024 18:20:11 +0200 Subject: [PATCH 03/16] HF tailscale version. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d7dc5d47848..f8cb7196052 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -44,7 +44,7 @@ jobs: - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4.4.1 - name: Tailscale - uses: tailscale/github-action@7bd8039bf25c23c4ab1b8d6e2cc2da2280601966 + uses: huggingface/tailscale-action@main with: authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - name: Login to GitHub Container Registry From 717bafe6efe76f34100f61e2605b9bcd64482a87 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 5 Jun 2024 18:55:08 +0200 Subject: [PATCH 04/16] Everyone gets a fix. --- .github/workflows/build.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index f8cb7196052..64df9def1a5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -116,7 +116,7 @@ jobs: cancel-in-progress: true needs: - build-and-push-image # Wait for the docker image to be built - runs-on: [self-hosted, nvidia-gpu , multi-gpu, a10, ci] + runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] env: DOCKER_VOLUME: /cache steps: @@ -152,7 +152,7 @@ jobs: needs: - build-and-push-image # Wait for the main docker image to be built - integration-tests # Wait for the main integration-tests - runs-on: [self-hosted, nvidia-gpu , multi-gpu, a10, ci] + runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] permissions: contents: write packages: write @@ -243,7 +243,7 @@ jobs: needs: - build-and-push-image # Wait for the main docker image to be built - integration-tests # Wait for the main integration-tests - runs-on: [self-hosted, nvidia-gpu , multi-gpu, a10, ci] + runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] permissions: contents: write packages: write From 7b55fd72c368abe3d17ddc80de9407d261a26664 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 5 Jun 2024 19:31:08 +0200 Subject: [PATCH 05/16] Tailscale for everyone. --- .github/workflows/build.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 64df9def1a5..6a9325a4161 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -128,7 +128,7 @@ jobs: with: python-version: 3.9 - name: Tailscale - uses: tailscale/github-action@7bd8039bf25c23c4ab1b8d6e2cc2da2280601966 + uses: huggingface/tailscale-action@main with: authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - name: Prepare disks @@ -170,7 +170,7 @@ jobs: - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4.4.1 - name: Tailscale - uses: tailscale/github-action@7bd8039bf25c23c4ab1b8d6e2cc2da2280601966 + uses: huggingface/tailscale-action@main with: authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - name: Login to GitHub Container Registry @@ -264,7 +264,7 @@ jobs: - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4.4.1 - name: Tailscale - uses: tailscale/github-action@7bd8039bf25c23c4ab1b8d6e2cc2da2280601966 + uses: huggingface/tailscale-action@main with: authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - name: Login to GitHub Container Registry From d7ac081b6219bfc635eebeabacf86282d12df36a Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 5 Jun 2024 19:44:50 +0200 Subject: [PATCH 06/16] No nvme it seems. --- .github/workflows/build.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 6a9325a4161..7aaf05cea28 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -131,11 +131,6 @@ jobs: uses: huggingface/tailscale-action@main with: authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - - name: Prepare disks - run: | - sudo mkfs -t ext4 /dev/nvme1n1 - sudo mkdir ${{ env.DOCKER_VOLUME }} - sudo mount /dev/nvme1n1 ${{ env.DOCKER_VOLUME }} - name: Install run: | make install-integration-tests From 97e91e9d9f8c44dca21d2cc904c86f8b2cc74a20 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 5 Jun 2024 23:04:22 +0200 Subject: [PATCH 07/16] Using the common cache. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 7aaf05cea28..9aaed06136c 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -118,7 +118,7 @@ jobs: - build-and-push-image # Wait for the docker image to be built runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] env: - DOCKER_VOLUME: /cache + DOCKER_VOLUME: /mnt/cache steps: - uses: actions/checkout@v2 - name: Inject slug/short variables From 586d2fbb8ac06d76b0dd490878a246cebbfd733f Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 09:17:12 +0200 Subject: [PATCH 08/16] Fusing and parallelizing builds. --- .github/workflows/build.yaml | 235 ++++------------------------------- 1 file changed, 22 insertions(+), 213 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 9aaed06136c..856ea6dd860 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -18,15 +18,23 @@ on: - "Cargo.lock" - "rust-toolchain.toml" - "Dockerfile" + - "Dockerfile_amd" + - "Dockerfile_intel" branches: - 'main' jobs: build-and-push-image: concurrency: - group: ${{ github.workflow }}-build-and-push-image-${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-build-and-push-image-${{ matrix.name }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] + strategy: + matrix: [ + {name: "cuda", label: "", dockerfile: "Dockerfile"}, + {name: "amd", label: "-rocm", dockerfile: "Dockerfile_amd"}, + {name: "intel", label: "-intel", dockerfile: "Dockerfile_intel"} + ] permissions: contents: write packages: write @@ -76,7 +84,7 @@ jobs: images: | registry.internal.huggingface.tech/api-inference/community/text-generation-inference tags: | - type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }} + type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }} # If main, release or tag - name: Extract metadata (tags, labels) for Docker if: ${{ github.event_name != 'pull_request' }} @@ -90,237 +98,38 @@ jobs: ghcr.io/huggingface/text-generation-inference db4c2190dd824d1f950f5d1555fbadf0.azurecr.io/text-generation-inference tags: | - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} - type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }} + type=semver,pattern={{version}}${{ matrix.label }} + type=semver,pattern={{major}}.{{minor}}${{ matrix.label }} + type=raw,value=latest${{ matrix.label }},enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} + type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }} - name: Build and push Docker image id: build-and-push uses: docker/build-push-action@v4 with: context: . - file: Dockerfile + file: ${{ matrix.dockerfile }} push: true platforms: 'linux/amd64' build-args: | GIT_SHA=${{ env.GITHUB_SHA }} - DOCKER_LABEL=sha-${{ env.GITHUB_SHA_SHORT }} + DOCKER_LABEL=sha-${{ env.GITHUB_SHA_SHORT }}${{ matrix.label }} tags: ${{ steps.meta.outputs.tags || steps.meta-pr.outputs.tags }} labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }} - cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache,mode=min - cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache,mode=min - - integration-tests: - concurrency: - group: ${{ github.workflow }}-${{ github.job }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - needs: - - build-and-push-image # Wait for the docker image to be built - runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] - env: - DOCKER_VOLUME: /mnt/cache - steps: - - uses: actions/checkout@v2 - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4.4.1 + cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min + cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min - name: Set up Python + if: ${{ matrix.name }} == "cuda" uses: actions/setup-python@v4 with: python-version: 3.9 - - name: Tailscale - uses: huggingface/tailscale-action@main - with: - authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - name: Install + if: ${{ matrix.name }} == "cuda" run: | make install-integration-tests - name: Run tests + if: ${{ matrix.name }} == "cuda" run: | + export DOCKER_VOLUME=/mnt/cache export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }} export HUGGING_FACE_HUB_TOKEN=${{ secrets.HUGGING_FACE_HUB_TOKEN }} pytest -s -vv integration-tests - - build-and-push-image-rocm: - concurrency: - group: ${{ github.workflow }}-build-and-push-image-rocm-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - needs: - - build-and-push-image # Wait for the main docker image to be built - - integration-tests # Wait for the main integration-tests - runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] - permissions: - contents: write - packages: write - # This is used to complete the identity challenge - # with sigstore/fulcio when running outside of PRs. - id-token: write - security-events: write - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - name: Initialize Docker Buildx - uses: docker/setup-buildx-action@v2.0.0 - with: - install: true - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4.4.1 - - name: Tailscale - uses: huggingface/tailscale-action@main - with: - authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - - name: Login to GitHub Container Registry - if: github.event_name != 'pull_request' - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Login to internal Container Registry - uses: docker/login-action@v2.1.0 - with: - username: ${{ secrets.TAILSCALE_DOCKER_USERNAME }} - password: ${{ secrets.TAILSCALE_DOCKER_PASSWORD }} - registry: registry.internal.huggingface.tech - - name: Login to Azure Container Registry - if: github.event_name != 'pull_request' - uses: docker/login-action@v2.1.0 - with: - username: ${{ secrets.AZURE_DOCKER_USERNAME }} - password: ${{ secrets.AZURE_DOCKER_PASSWORD }} - registry: db4c2190dd824d1f950f5d1555fbadf0.azurecr.io - # If pull request - - name: Extract metadata (tags, labels) for Docker - if: ${{ github.event_name == 'pull_request' }} - id: meta-pr - uses: docker/metadata-action@v4.3.0 - with: - images: | - registry.internal.huggingface.tech/api-inference/community/text-generation-inference - tags: | - type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}-rocm - # If main, release or tag - - name: Extract metadata (tags, labels) for Docker - if: ${{ github.event_name != 'pull_request' }} - id: meta - uses: docker/metadata-action@v4.3.0 - with: - flavor: | - latest=false - images: | - registry.internal.huggingface.tech/api-inference/community/text-generation-inference - ghcr.io/huggingface/text-generation-inference - db4c2190dd824d1f950f5d1555fbadf0.azurecr.io/text-generation-inference - tags: | - type=semver,pattern={{version}}-rocm - type=semver,pattern={{major}}.{{minor}}-rocm - type=raw,value=latest-rocm,enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} - type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}-rocm - - name: Build and push Docker image - id: build-and-push - uses: docker/build-push-action@v4 - with: - context: . - file: Dockerfile_amd - push: true - platforms: 'linux/amd64' - build-args: | - GIT_SHA=${{ env.GITHUB_SHA }} - DOCKER_LABEL=sha-${{ env.GITHUB_SHA_SHORT }}-rocm - tags: ${{ steps.meta.outputs.tags || steps.meta-pr.outputs.tags }} - labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }} - cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache-rocm,mode=min - cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache-rocm,mode=min - - build-and-push-image-intel: - concurrency: - group: ${{ github.workflow }}-build-and-push-image-intel-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - needs: - - build-and-push-image # Wait for the main docker image to be built - - integration-tests # Wait for the main integration-tests - runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] - permissions: - contents: write - packages: write - # This is used to complete the identity challenge - # with sigstore/fulcio when running outside of PRs. - id-token: write - security-events: write - outputs: - # env is not available in the later `container:`, but previous job outputs are. - short_sha: ${{ env.GITHUB_SHA_SHORT }} - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - name: Initialize Docker Buildx - uses: docker/setup-buildx-action@v2.0.0 - with: - install: true - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4.4.1 - - name: Tailscale - uses: huggingface/tailscale-action@main - with: - authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - - name: Login to GitHub Container Registry - if: github.event_name != 'pull_request' - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Login to internal Container Registry - uses: docker/login-action@v2.1.0 - with: - username: ${{ secrets.TAILSCALE_DOCKER_USERNAME }} - password: ${{ secrets.TAILSCALE_DOCKER_PASSWORD }} - registry: registry.internal.huggingface.tech - - name: Login to Azure Container Registry - if: github.event_name != 'pull_request' - uses: docker/login-action@v2.1.0 - with: - username: ${{ secrets.AZURE_DOCKER_USERNAME }} - password: ${{ secrets.AZURE_DOCKER_PASSWORD }} - registry: db4c2190dd824d1f950f5d1555fbadf0.azurecr.io - # If pull request - - name: Extract metadata (tags, labels) for Docker - if: ${{ github.event_name == 'pull_request' }} - id: meta-pr - uses: docker/metadata-action@v4.3.0 - with: - images: | - registry.internal.huggingface.tech/api-inference/community/text-generation-inference - tags: | - type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}-intel - # If main, release or tag - - name: Extract metadata (tags, labels) for Docker - if: ${{ github.event_name != 'pull_request' }} - id: meta - uses: docker/metadata-action@v4.3.0 - with: - flavor: | - latest=false - images: | - registry.internal.huggingface.tech/api-inference/community/text-generation-inference - ghcr.io/huggingface/text-generation-inference - db4c2190dd824d1f950f5d1555fbadf0.azurecr.io/text-generation-inference - tags: | - type=semver,pattern={{version}}-intel - type=semver,pattern={{major}}.{{minor}}-intel - type=raw,value=latest-intel,enable=${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} - type=raw,value=sha-${{ env.GITHUB_SHA_SHORT }}-intel - - name: Build and push Docker image - id: build-and-push - uses: docker/build-push-action@v4 - with: - context: . - file: Dockerfile_intel - push: true - platforms: 'linux/amd64' - build-args: | - GIT_SHA=${{ env.GITHUB_SHA }} - DOCKER_LABEL=sha-${{ env.GITHUB_SHA_SHORT }}-intel - tags: ${{ steps.meta.outputs.tags || steps.meta-pr.outputs.tags }} - labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }} - cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache-intel,mode=min - cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache-intel,mode=min From 97aad8930f1e10f56b514a00347a3bf34369e917 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 09:23:24 +0200 Subject: [PATCH 09/16] Fix yaml? --- .github/workflows/build.yaml | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 856ea6dd860..a24e85006d6 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -30,11 +30,17 @@ jobs: cancel-in-progress: true runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] strategy: - matrix: [ - {name: "cuda", label: "", dockerfile: "Dockerfile"}, - {name: "amd", label: "-rocm", dockerfile: "Dockerfile_amd"}, - {name: "intel", label: "-intel", dockerfile: "Dockerfile_intel"} - ] + matrix: + include: + - name: "cuda" + label: "" + dockerfile: "Dockerfile" + - name: "amd" + label: "-rocm" + dockerfile: "Dockerfile_amd" + - name: "intel" + label: "-intel" + dockerfile: "Dockerfile_intel" permissions: contents: write packages: write From a00d00a6e81d520615c7bebf5a0afab59ff8e0ca Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 09:46:51 +0200 Subject: [PATCH 10/16] If conditional wrong ? --- .github/workflows/build.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a24e85006d6..9bf55016fe0 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -124,16 +124,16 @@ jobs: cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min - name: Set up Python - if: ${{ matrix.name }} == "cuda" + if: matrix.name == "cuda" uses: actions/setup-python@v4 with: python-version: 3.9 - name: Install - if: ${{ matrix.name }} == "cuda" + if: matrix.name == "cuda" run: | make install-integration-tests - name: Run tests - if: ${{ matrix.name }} == "cuda" + if: matrix.name == "cuda" run: | export DOCKER_VOLUME=/mnt/cache export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }} From 3d300960563d0e610bc592c2c91c22e97caa0cb9 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 09:48:20 +0200 Subject: [PATCH 11/16] single quote. --- .github/workflows/build.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 9bf55016fe0..84266ce5206 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -124,16 +124,16 @@ jobs: cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min - name: Set up Python - if: matrix.name == "cuda" + if: matrix.name == 'cuda' uses: actions/setup-python@v4 with: python-version: 3.9 - name: Install - if: matrix.name == "cuda" + if: matrix.name == 'cuda' run: | make install-integration-tests - name: Run tests - if: matrix.name == "cuda" + if: matrix.name == 'cuda' run: | export DOCKER_VOLUME=/mnt/cache export DOCKER_IMAGE=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:sha-${{ env.GITHUB_SHA_SHORT }} From 1ac7a112fe7bc6dc5d34aeb2fda56ce98bdfd5c4 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 10:34:25 +0200 Subject: [PATCH 12/16] Less cache misses ? --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 659e2673680..0cffda4c75c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,9 +15,6 @@ RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder -ARG GIT_SHA -ARG DOCKER_LABEL - RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ @@ -27,6 +24,9 @@ RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --profile release-opt --recipe-path recipe.json +ARG GIT_SHA +ARG DOCKER_LABEL + COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto From 1c4c4d6aed627bf71b54629ec359fcf2345a9eaf Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 13:51:40 +0200 Subject: [PATCH 13/16] Micro optimization. --- Dockerfile | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0cffda4c75c..b029f0fff2a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -225,6 +225,14 @@ RUN cd server && \ pip install -r requirements_cuda.txt && \ pip install ".[bnb, accelerate, quantize, peft, outlines]" --no-cache-dir +# Deps before the binaries +# The binaries change on every build given we burn the SHA into them +# The deps change less often. +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + g++ \ + && rm -rf /var/lib/apt/lists/* + # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router @@ -232,10 +240,6 @@ COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/loca # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - build-essential \ - g++ \ - && rm -rf /var/lib/apt/lists/* # AWS Sagemaker compatible image FROM base as sagemaker From 5d16af6d3585632d98116c7c0c2f92fb4754c796 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 14:08:06 +0200 Subject: [PATCH 14/16] Fixing the arg dependency on AMD/Intel. --- Dockerfile_amd | 6 +++--- Dockerfile_intel | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Dockerfile_amd b/Dockerfile_amd index b0d181ea1a9..c79bc03c5b3 100644 --- a/Dockerfile_amd +++ b/Dockerfile_amd @@ -15,9 +15,6 @@ RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder -ARG GIT_SHA -ARG DOCKER_LABEL - RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ @@ -27,6 +24,9 @@ RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --profile release-opt --recipe-path recipe.json +ARG GIT_SHA +ARG DOCKER_LABEL + COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto diff --git a/Dockerfile_intel b/Dockerfile_intel index 0a7000036c1..ee96392894a 100644 --- a/Dockerfile_intel +++ b/Dockerfile_intel @@ -14,9 +14,6 @@ RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder -ARG GIT_SHA -ARG DOCKER_LABEL - RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ @@ -26,6 +23,9 @@ RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --profile release-opt --recipe-path recipe.json +ARG GIT_SHA +ARG DOCKER_LABEL + COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto From ab7578b9c0bea1cd7c69d7c83b2e4e778d99df78 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 14:23:05 +0200 Subject: [PATCH 15/16] Using CPU to build the images (caveat: Waiting on all 3 builds before integration tests). --- .github/workflows/build.yaml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 84266ce5206..d28ba29a2d4 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -28,9 +28,9 @@ jobs: concurrency: group: ${{ github.workflow }}-build-and-push-image-${{ matrix.name }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true - runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] + runs-on: [self-hosted, cpu] strategy: - matrix: + matrix: include: - name: "cuda" label: "" @@ -123,6 +123,13 @@ jobs: labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }} cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min + integration-tests: + concurrency: + group: ${{ github.workflow }}-build-and-push-image-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] + needs: build-and-push-image + steps: - name: Set up Python if: matrix.name == 'cuda' uses: actions/setup-python@v4 From ff7ebf4acaf765001a9b01a517fb3d3e841a7305 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 6 Jun 2024 14:34:50 +0200 Subject: [PATCH 16/16] Revert "Using CPU to build the images (caveat: Waiting on all 3 builds before" This reverts commit ab7578b9c0bea1cd7c69d7c83b2e4e778d99df78. --- .github/workflows/build.yaml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d28ba29a2d4..84266ce5206 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -28,9 +28,9 @@ jobs: concurrency: group: ${{ github.workflow }}-build-and-push-image-${{ matrix.name }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true - runs-on: [self-hosted, cpu] + runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] strategy: - matrix: + matrix: include: - name: "cuda" label: "" @@ -123,13 +123,6 @@ jobs: labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }} cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min - integration-tests: - concurrency: - group: ${{ github.workflow }}-build-and-push-image-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] - needs: build-and-push-image - steps: - name: Set up Python if: matrix.name == 'cuda' uses: actions/setup-python@v4