From b3c1c0bc3ecb009d5bb4885b738c70ee58a2f1dc Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Sun, 5 Jan 2025 17:34:05 +0000 Subject: [PATCH] Update container images to use latest Update CUDA base image from version to 12.6.3 for both builder and runtime stages. Update UBI base image to 9.5. Update ROCm version in to 6.3.1. Signed-off-by: Eric Curtin --- .github/workflows/ci-images.yml | 2 +- Makefile | 6 ++++++ container-images/asahi/Containerfile | 7 +------ container-images/cuda/Containerfile | 11 +++-------- container-images/ramalama/Containerfile | 9 ++------- container-images/rocm/Containerfile | 6 +----- container-images/rocm/amdgpu.repo | 2 +- container-images/rocm/rocm.repo | 2 +- .../scripts/build_llama_and_whisper.sh | 16 +++++++--------- container-images/vulkan/Containerfile | 3 +-- container_build.sh | 9 +++++++-- 11 files changed, 31 insertions(+), 42 deletions(-) diff --git a/.github/workflows/ci-images.yml b/.github/workflows/ci-images.yml index df9f3b41..c985f767 100644 --- a/.github/workflows/ci-images.yml +++ b/.github/workflows/ci-images.yml @@ -68,4 +68,4 @@ jobs: - name: Build Images run: | - make build + make build_rm diff --git a/Makefile b/Makefile index eefae658..137589f6 100644 --- a/Makefile +++ b/Makefile @@ -78,6 +78,12 @@ ifeq ($(OS),Linux) ./container_build.sh build $(IMAGE) endif +.PHONY: build_rm +build: +ifeq ($(OS),Linux) + ./container_build.sh -r build $(IMAGE) +endif + .PHONY: install-docs install-docs: docs make -C docs install diff --git a/container-images/asahi/Containerfile b/container-images/asahi/Containerfile index d340e000..eab5a2ee 100644 --- a/container-images/asahi/Containerfile +++ b/container-images/asahi/Containerfile @@ -1,11 +1,6 @@ FROM fedora:41 -ARG LLAMA_CPP_SHA=0827b2c1da299805288abbd556d869318f2b121e -# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest -ARG WHISPER_CPP_SHA=3de9deead5759eb038966990e3cb5d83984ae467 - COPY ../scripts /scripts RUN chmod +x /scripts/*.sh && \ - /scripts/build_llama_and_whisper.sh "asahi" "$LLAMA_CPP_SHA" \ - "$WHISPER_CPP_SHA" + /scripts/build_llama_and_whisper.sh "asahi" diff --git a/container-images/cuda/Containerfile b/container-images/cuda/Containerfile index 27eaa6af..32067d38 100644 --- a/container-images/cuda/Containerfile +++ b/container-images/cuda/Containerfile @@ -1,17 +1,12 @@ # Base image with CUDA for compilation -FROM docker.io/nvidia/cuda:12.6.2-devel-ubi9 AS builder - -ARG LLAMA_CPP_SHA=0827b2c1da299805288abbd556d869318f2b121e -# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest -ARG WHISPER_CPP_SHA=3de9deead5759eb038966990e3cb5d83984ae467 +FROM docker.io/nvidia/cuda:12.6.3-devel-ubi9 AS builder COPY ../scripts /scripts RUN chmod +x /scripts/*.sh && \ - /scripts/build_llama_and_whisper.sh "cuda" "$LLAMA_CPP_SHA" \ - "$WHISPER_CPP_SHA" + /scripts/build_llama_and_whisper.sh "cuda" # Final runtime image -FROM docker.io/nvidia/cuda:12.6.2-runtime-ubi9 +FROM docker.io/nvidia/cuda:12.6.3-runtime-ubi9 RUN dnf install -y python3 && \ dnf clean all && rm -rf /var/cache/*dnf* diff --git a/container-images/ramalama/Containerfile b/container-images/ramalama/Containerfile index 73996cc1..64ac4251 100644 --- a/container-images/ramalama/Containerfile +++ b/container-images/ramalama/Containerfile @@ -1,13 +1,8 @@ -FROM registry.access.redhat.com/ubi9/ubi:9.4-1214.1729773476 - -ARG LLAMA_CPP_SHA=0827b2c1da299805288abbd556d869318f2b121e -# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest -ARG WHISPER_CPP_SHA=3de9deead5759eb038966990e3cb5d83984ae467 +FROM registry.access.redhat.com/ubi9/ubi:9.5 COPY ../scripts /scripts RUN chmod +x /scripts/*.sh && \ - /scripts/build_llama_and_whisper.sh "ramalama" "$LLAMA_CPP_SHA" \ - "$WHISPER_CPP_SHA" + /scripts/build_llama_and_whisper.sh "ramalama" ENV WHISPER_CPP_SHA=${WHISPER_CPP_SHA} ENV LLAMA_CPP_SHA=${LLAMA_CPP_SHA} diff --git a/container-images/rocm/Containerfile b/container-images/rocm/Containerfile index 953b5d99..ba49c5a2 100644 --- a/container-images/rocm/Containerfile +++ b/container-images/rocm/Containerfile @@ -1,12 +1,8 @@ FROM quay.io/ramalama/ramalama:latest -ARG ROCM_VERSION=6.2.2 -ARG AMDGPU_VERSION=6.2.2 - COPY rocm/amdgpu.repo /etc/yum.repos.d/ COPY rocm/rocm.repo /etc/yum.repos.d/ COPY scripts /scripts RUN chmod +x /scripts/*.sh && \ - /scripts/build_llama_and_whisper.sh "rocm" "$LLAMA_CPP_SHA" \ - "$WHISPER_CPP_SHA" + /scripts/build_llama_and_whisper.sh "rocm" diff --git a/container-images/rocm/amdgpu.repo b/container-images/rocm/amdgpu.repo index 5e950ebe..8e5f28a3 100644 --- a/container-images/rocm/amdgpu.repo +++ b/container-images/rocm/amdgpu.repo @@ -1,6 +1,6 @@ [amdgpu] name=amdgpu -baseurl=https://repo.radeon.com/amdgpu/6.2.2/rhel/9.4/main/x86_64/ +baseurl=https://repo.radeon.com/amdgpu/6.3.1/rhel/9.5/main/x86_64/ enabled=1 priority=50 gpgcheck=1 diff --git a/container-images/rocm/rocm.repo b/container-images/rocm/rocm.repo index ce6e9823..bcc62e12 100644 --- a/container-images/rocm/rocm.repo +++ b/container-images/rocm/rocm.repo @@ -1,6 +1,6 @@ [ROCm] name=ROCm -baseurl=https://repo.radeon.com/rocm/rhel9/6.2.2/main +baseurl=https://repo.radeon.com/rocm/rhel9/6.3.1/main enabled=1 priority=50 gpgcheck=1 diff --git a/container-images/scripts/build_llama_and_whisper.sh b/container-images/scripts/build_llama_and_whisper.sh index 46991dab..853ff2b1 100644 --- a/container-images/scripts/build_llama_and_whisper.sh +++ b/container-images/scripts/build_llama_and_whisper.sh @@ -71,9 +71,9 @@ configure_common_flags() { } clone_and_build_whisper_cpp() { - local whisper_cpp_sha="$1" - local install_prefix="$2" - local whisper_flags=("${!3}") + local install_prefix="$1" + local whisper_flags=("${!2}") + local whisper_cpp_sha="3de9deead5759eb038966990e3cb5d83984ae467" whisper_flags+=("-DBUILD_SHARED_LIBS=NO") git clone https://github.com/ggerganov/whisper.cpp @@ -88,8 +88,8 @@ clone_and_build_whisper_cpp() { } clone_and_build_llama_cpp() { - local llama_cpp_sha="$1" - local common_flags=("${!2}") + local common_flags=("${!1}") + local llama_cpp_sha="0827b2c1da299805288abbd556d869318f2b121e" git clone https://github.com/ggerganov/llama.cpp cd llama.cpp @@ -103,8 +103,6 @@ main() { set -ex local containerfile="$1" - local llama_cpp_sha="$2" - local whisper_cpp_sha="$3" local install_prefix set_install_prefix local common_flags @@ -112,14 +110,14 @@ main() { common_flags+=("-DGGML_CCACHE=0" "-DCMAKE_INSTALL_PREFIX=$install_prefix") dnf_install - clone_and_build_whisper_cpp "$whisper_cpp_sha" "$install_prefix" common_flags[@] + clone_and_build_whisper_cpp "$install_prefix" common_flags[@] case "$containerfile" in ramalama) common_flags+=("-DGGML_KOMPUTE=ON" "-DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON") ;; esac - clone_and_build_llama_cpp "$llama_cpp_sha" common_flags[@] + clone_and_build_llama_cpp common_flags[@] dnf clean all rm -rf /var/cache/*dnf* /opt/rocm-*/lib/llvm \ /opt/rocm-*/lib/rocblas/library/*gfx9* llama.cpp whisper.cpp diff --git a/container-images/vulkan/Containerfile b/container-images/vulkan/Containerfile index dd910442..5ed08318 100644 --- a/container-images/vulkan/Containerfile +++ b/container-images/vulkan/Containerfile @@ -2,6 +2,5 @@ FROM quay.io/ramalama/ramalama:latest COPY ../scripts /scripts RUN chmod +x /scripts/*.sh && \ - /scripts/build_llama_and_whisper.sh "vulkan" "$LLAMA_CPP_SHA" \ - "$WHISPER_CPP_SHA" + /scripts/build_llama_and_whisper.sh "vulkan" diff --git a/container_build.sh b/container_build.sh index 07058277..8db8a74b 100755 --- a/container_build.sh +++ b/container_build.sh @@ -25,7 +25,7 @@ add_build_platform() { } rm_container_image() { - if [ "$image_name" == "cuda" ] || [ "$image_name" == "rocm" ]; then + if $rm_after_build; then "$conman_bin" rmi -f "$image_name" || true fi } @@ -35,7 +35,6 @@ build() { local image_name="${1//container-images\//}" local conman_build=("${conman[@]}") local conman_show_size=("${conman[@]}" "images" "--filter" "reference='quay.io/ramalama/$image_name'") - if [ "$3" == "-d" ]; then add_build_platform echo "${conman_build[@]}" @@ -81,6 +80,10 @@ parse_arguments() { option="$1" shift ;; + -r) + rm_after_build="true" + shift + ;; build|push) command="$1" shift @@ -113,6 +116,7 @@ print_usage() { echo echo "Options:" echo " -d Some option description" + echo " -r Remove container image after build" echo echo "Targets:" echo " Specify the target container image to build or push" @@ -129,6 +133,7 @@ main() { local target="" local command="" local option="" + local rm_after_build="false" parse_arguments "$@"