Skip to content

Commit

Permalink
Merge pull request #534 from containers/updates
Browse files Browse the repository at this point in the history
Update container images to use latest
  • Loading branch information
rhatdan authored Jan 6, 2025
2 parents 4a6ac32 + b3c1c0b commit 6c83731
Show file tree
Hide file tree
Showing 11 changed files with 31 additions and 42 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci-images.yml
Original file line number Diff line number Diff line change
Expand Up @@ -68,4 +68,4 @@ jobs:
- name: Build Images
run: |
make build
make build_rm
6 changes: 6 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,12 @@ ifeq ($(OS),Linux)
./container_build.sh build $(IMAGE)
endif

.PHONY: build_rm
build:
ifeq ($(OS),Linux)
./container_build.sh -r build $(IMAGE)
endif

.PHONY: install-docs
install-docs: docs
make -C docs install
Expand Down
7 changes: 1 addition & 6 deletions container-images/asahi/Containerfile
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
FROM fedora:41

ARG LLAMA_CPP_SHA=0827b2c1da299805288abbd556d869318f2b121e
# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest
ARG WHISPER_CPP_SHA=3de9deead5759eb038966990e3cb5d83984ae467

COPY ../scripts /scripts
RUN chmod +x /scripts/*.sh && \
/scripts/build_llama_and_whisper.sh "asahi" "$LLAMA_CPP_SHA" \
"$WHISPER_CPP_SHA"
/scripts/build_llama_and_whisper.sh "asahi"

11 changes: 3 additions & 8 deletions container-images/cuda/Containerfile
Original file line number Diff line number Diff line change
@@ -1,17 +1,12 @@
# Base image with CUDA for compilation
FROM docker.io/nvidia/cuda:12.6.2-devel-ubi9 AS builder

ARG LLAMA_CPP_SHA=0827b2c1da299805288abbd556d869318f2b121e
# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest
ARG WHISPER_CPP_SHA=3de9deead5759eb038966990e3cb5d83984ae467
FROM docker.io/nvidia/cuda:12.6.3-devel-ubi9 AS builder

COPY ../scripts /scripts
RUN chmod +x /scripts/*.sh && \
/scripts/build_llama_and_whisper.sh "cuda" "$LLAMA_CPP_SHA" \
"$WHISPER_CPP_SHA"
/scripts/build_llama_and_whisper.sh "cuda"

# Final runtime image
FROM docker.io/nvidia/cuda:12.6.2-runtime-ubi9
FROM docker.io/nvidia/cuda:12.6.3-runtime-ubi9

RUN dnf install -y python3 && \
dnf clean all && rm -rf /var/cache/*dnf*
Expand Down
9 changes: 2 additions & 7 deletions container-images/ramalama/Containerfile
Original file line number Diff line number Diff line change
@@ -1,13 +1,8 @@
FROM registry.access.redhat.com/ubi9/ubi:9.4-1214.1729773476

ARG LLAMA_CPP_SHA=0827b2c1da299805288abbd556d869318f2b121e
# renovate: datasource=git-refs depName=ggerganov/whisper.cpp packageName=https://github.com/ggerganov/whisper.cpp gitRef=master versioning=loose type=digest
ARG WHISPER_CPP_SHA=3de9deead5759eb038966990e3cb5d83984ae467
FROM registry.access.redhat.com/ubi9/ubi:9.5

COPY ../scripts /scripts
RUN chmod +x /scripts/*.sh && \
/scripts/build_llama_and_whisper.sh "ramalama" "$LLAMA_CPP_SHA" \
"$WHISPER_CPP_SHA"
/scripts/build_llama_and_whisper.sh "ramalama"

ENV WHISPER_CPP_SHA=${WHISPER_CPP_SHA}
ENV LLAMA_CPP_SHA=${LLAMA_CPP_SHA}
6 changes: 1 addition & 5 deletions container-images/rocm/Containerfile
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
FROM quay.io/ramalama/ramalama:latest

ARG ROCM_VERSION=6.2.2
ARG AMDGPU_VERSION=6.2.2

COPY rocm/amdgpu.repo /etc/yum.repos.d/
COPY rocm/rocm.repo /etc/yum.repos.d/
COPY scripts /scripts
RUN chmod +x /scripts/*.sh && \
/scripts/build_llama_and_whisper.sh "rocm" "$LLAMA_CPP_SHA" \
"$WHISPER_CPP_SHA"
/scripts/build_llama_and_whisper.sh "rocm"

2 changes: 1 addition & 1 deletion container-images/rocm/amdgpu.repo
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[amdgpu]
name=amdgpu
baseurl=https://repo.radeon.com/amdgpu/6.2.2/rhel/9.4/main/x86_64/
baseurl=https://repo.radeon.com/amdgpu/6.3.1/rhel/9.5/main/x86_64/
enabled=1
priority=50
gpgcheck=1
Expand Down
2 changes: 1 addition & 1 deletion container-images/rocm/rocm.repo
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[ROCm]
name=ROCm
baseurl=https://repo.radeon.com/rocm/rhel9/6.2.2/main
baseurl=https://repo.radeon.com/rocm/rhel9/6.3.1/main
enabled=1
priority=50
gpgcheck=1
Expand Down
16 changes: 7 additions & 9 deletions container-images/scripts/build_llama_and_whisper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ configure_common_flags() {
}

clone_and_build_whisper_cpp() {
local whisper_cpp_sha="$1"
local install_prefix="$2"
local whisper_flags=("${!3}")
local install_prefix="$1"
local whisper_flags=("${!2}")
local whisper_cpp_sha="3de9deead5759eb038966990e3cb5d83984ae467"
whisper_flags+=("-DBUILD_SHARED_LIBS=NO")

git clone https://github.com/ggerganov/whisper.cpp
Expand All @@ -88,8 +88,8 @@ clone_and_build_whisper_cpp() {
}

clone_and_build_llama_cpp() {
local llama_cpp_sha="$1"
local common_flags=("${!2}")
local common_flags=("${!1}")
local llama_cpp_sha="0827b2c1da299805288abbd556d869318f2b121e"

git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp
Expand All @@ -103,23 +103,21 @@ main() {
set -ex

local containerfile="$1"
local llama_cpp_sha="$2"
local whisper_cpp_sha="$3"
local install_prefix
set_install_prefix
local common_flags
configure_common_flags "$containerfile" common_flags

common_flags+=("-DGGML_CCACHE=0" "-DCMAKE_INSTALL_PREFIX=$install_prefix")
dnf_install
clone_and_build_whisper_cpp "$whisper_cpp_sha" "$install_prefix" common_flags[@]
clone_and_build_whisper_cpp "$install_prefix" common_flags[@]
case "$containerfile" in
ramalama)
common_flags+=("-DGGML_KOMPUTE=ON" "-DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON")
;;
esac

clone_and_build_llama_cpp "$llama_cpp_sha" common_flags[@]
clone_and_build_llama_cpp common_flags[@]
dnf clean all
rm -rf /var/cache/*dnf* /opt/rocm-*/lib/llvm \
/opt/rocm-*/lib/rocblas/library/*gfx9* llama.cpp whisper.cpp
Expand Down
3 changes: 1 addition & 2 deletions container-images/vulkan/Containerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,5 @@ FROM quay.io/ramalama/ramalama:latest

COPY ../scripts /scripts
RUN chmod +x /scripts/*.sh && \
/scripts/build_llama_and_whisper.sh "vulkan" "$LLAMA_CPP_SHA" \
"$WHISPER_CPP_SHA"
/scripts/build_llama_and_whisper.sh "vulkan"

9 changes: 7 additions & 2 deletions container_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ add_build_platform() {
}

rm_container_image() {
if [ "$image_name" == "cuda" ] || [ "$image_name" == "rocm" ]; then
if $rm_after_build; then
"$conman_bin" rmi -f "$image_name" || true
fi
}
Expand All @@ -35,7 +35,6 @@ build() {
local image_name="${1//container-images\//}"
local conman_build=("${conman[@]}")
local conman_show_size=("${conman[@]}" "images" "--filter" "reference='quay.io/ramalama/$image_name'")

if [ "$3" == "-d" ]; then
add_build_platform
echo "${conman_build[@]}"
Expand Down Expand Up @@ -81,6 +80,10 @@ parse_arguments() {
option="$1"
shift
;;
-r)
rm_after_build="true"
shift
;;
build|push)
command="$1"
shift
Expand Down Expand Up @@ -113,6 +116,7 @@ print_usage() {
echo
echo "Options:"
echo " -d Some option description"
echo " -r Remove container image after build"
echo
echo "Targets:"
echo " Specify the target container image to build or push"
Expand All @@ -129,6 +133,7 @@ main() {
local target=""
local command=""
local option=""
local rm_after_build="false"

parse_arguments "$@"

Expand Down

0 comments on commit 6c83731

Please sign in to comment.