diff --git a/kubernetes/kfserving/README.md b/kubernetes/kfserving/README.md index 063ca62985..a66100c08a 100644 --- a/kubernetes/kfserving/README.md +++ b/kubernetes/kfserving/README.md @@ -6,21 +6,29 @@ Currently, KFServing supports the Inference API for all the existing models but ### Docker Image Building -For CPU Image +* To create a CPU based image + ``` -docker build -t pytorch/torchserve-kfs:latest . +./build_image.sh ``` -For GPU Image +* To create a CPU based image with custom tag + ``` -docker build --build-arg BASE_IMAGE=pytorch/torchserve:latest-gpu -t pytorch/torchserve-kfs:latest-gpu . +./build_image.sh -t /: ``` -Push image to repository +* To create a GPU based image + ``` -docker push pytorch/torchserve-kfs:latest +./build_image.sh -g ``` +* To create a GPU based image with custom tag + +``` +./build_image.sh -g -t /: +``` Individual Readmes for KFServing : diff --git a/kubernetes/kfserving/build_image.sh b/kubernetes/kfserving/build_image.sh new file mode 100755 index 0000000000..ea7b587327 --- /dev/null +++ b/kubernetes/kfserving/build_image.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +DOCKER_TAG="pytorch/torchserve-kfs:latest" +BASE_IMAGE="pytorch/torchserve:latest" + +for arg in "$@" +do + case $arg in + -h|--help) + echo "options:" + echo "-h, --help show brief help" + echo "-g, --gpu specify for gpu build" + echo "-t, --tag specify tag name for docker image" + exit 0 + ;; + -g|--gpu) + DOCKER_TAG="pytorch/torchserve-kfs:latest-gpu" + BASE_IMAGE="pytorch/torchserve:latest-gpu" + shift + ;; + -t|--tag) + DOCKER_TAG="$2" + shift + shift + ;; + esac +done + +DOCKER_BUILDKIT=1 docker build --file Dockerfile --build-arg BASE_IMAGE=$BASE_IMAGE -t "$DOCKER_TAG" .