From ec1f0ab45948604948b76beb51e249de75828de2 Mon Sep 17 00:00:00 2001 From: astafevav Date: Wed, 6 Nov 2024 20:02:22 +0700 Subject: [PATCH 1/5] Add compose deploy example for FaqGen on AMD ROCm Signed-off-by: astafevav --- FaqGen/docker_compose/amd/gpu/rocm/README.md | 114 ++++++++++++ .../docker_compose/amd/gpu/rocm/compose.yaml | 79 +++++++++ FaqGen/tests/test_compose_on_rocm.sh | 167 ++++++++++++++++++ 3 files changed, 360 insertions(+) create mode 100644 FaqGen/docker_compose/amd/gpu/rocm/README.md create mode 100644 FaqGen/docker_compose/amd/gpu/rocm/compose.yaml create mode 100644 FaqGen/tests/test_compose_on_rocm.sh diff --git a/FaqGen/docker_compose/amd/gpu/rocm/README.md b/FaqGen/docker_compose/amd/gpu/rocm/README.md new file mode 100644 index 000000000..f28ba9589 --- /dev/null +++ b/FaqGen/docker_compose/amd/gpu/rocm/README.md @@ -0,0 +1,114 @@ +## 🚀 Start Microservices and MegaService + +### Required Models + +We set default model as "meta-llama/Meta-Llama-3-8B-Instruct", change "LLM_MODEL_ID" in following Environment Variables setting if you want to use other models. + +If use gated models, you also need to provide [huggingface token](https://huggingface.co/docs/hub/security-tokens) to "HUGGINGFACEHUB_API_TOKEN" environment variable. + +### Setup Environment Variables + +Since the `compose.yaml` will consume some environment variables, you need to setup them in advance as below. + +```bash +export FAQGEN_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" +export HOST_IP=${your_no_proxy} +export FAQGEN_TGI_SERVICE_PORT=8008 +export FAQGEN_LLM_SERVER_PORT=9000 +export FAQGEN_HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} +export FAQGEN_BACKEND_SERVER_PORT=8888 +export FAGGEN_UI_PORT=5173 +``` + +Note: Please replace with `host_ip` with your external IP address, do not use localhost. + +Note: In order to limit access to a subset of GPUs, please pass each device individually using one or more -device /dev/dri/rendered, where is the card index, starting from 128. (https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html#docker-restrict-gpus) + +Example for set isolation for 1 GPU + +``` + - /dev/dri/card0:/dev/dri/card0 + - /dev/dri/renderD128:/dev/dri/renderD128 +``` + +Example for set isolation for 2 GPUs + +``` + - /dev/dri/card0:/dev/dri/card0 + - /dev/dri/renderD128:/dev/dri/renderD128 + - /dev/dri/card0:/dev/dri/card0 + - /dev/dri/renderD129:/dev/dri/renderD129 +``` + +Pelase find more information about accessing and restricting AMD GPUs in the link (https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html#docker-restrict-gpus) + +### Start Microservice Docker Containers + +```bash +cd GenAIExamples/FaqGen/docker_compose/amd/gpu/rocm/ +docker compose up -d +``` + +### Validate Microservices + +1. TGI Service + + ```bash + curl http://${host_ip}:8008/generate \ + -X POST \ + -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \ + -H 'Content-Type: application/json' + ``` + +2. LLM Microservice + + ```bash + curl http://${host_ip}:9000/v1/faqgen \ + -X POST \ + -d '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' \ + -H 'Content-Type: application/json' + ``` + +3. MegaService + + ```bash + curl http://${host_ip}:8888/v1/faqgen -H "Content-Type: application/json" -d '{ + "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." + }' + ``` + + Following the validation of all aforementioned microservices, we are now prepared to construct a mega-service. + +## 🚀 Launch the UI + +Open this URL `http://{host_ip}:5173` in your browser to access the frontend. + +![project-screenshot](../../../../assets/img/faqgen_ui_text.png) + +## 🚀 Launch the React UI (Optional) + +To access the FAQGen (react based) frontend, modify the UI service in the `compose.yaml` file. Replace `faqgen-rocm-ui-server` service with the `faqgen-rocm-react-ui-server` service as per the config below: + +```bash + faqgen-rocm-react-ui-server: + image: opea/faqgen-react-ui:latest + container_name: faqgen-rocm-react-ui-server + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + ports: + - 5174:80 + depends_on: + - faqgen-rocm-backend-server + ipc: host + restart: always +``` + +Open this URL `http://{host_ip}:5174` in your browser to access the react based frontend. + +- Create FAQs from Text input + ![project-screenshot](../../../../assets/img/faqgen_react_ui_text.png) + +- Create FAQs from Text Files + ![project-screenshot](../../../../assets/img/faqgen_react_ui_text_file.png) diff --git a/FaqGen/docker_compose/amd/gpu/rocm/compose.yaml b/FaqGen/docker_compose/amd/gpu/rocm/compose.yaml new file mode 100644 index 000000000..f60024e8a --- /dev/null +++ b/FaqGen/docker_compose/amd/gpu/rocm/compose.yaml @@ -0,0 +1,79 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + faqgen-tgi-service: + image: ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + container_name: faggen-tgi-service + ports: + - "${FAQGEN_TGI_SERVICE_PORT}:80" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TGI_LLM_ENDPOINT: "http://${HOST_IP}:${FAQGEN_TGI_SERVICE_PORT}" + HUGGINGFACEHUB_API_TOKEN: ${FAQGEN_HUGGINGFACEHUB_API_TOKEN} + HUGGING_FACE_HUB_TOKEN: ${FAQGEN_HUGGINGFACEHUB_API_TOKEN} + volumes: + - "./data:/data" + shm_size: 1g + devices: + - /dev/kfd:/dev/kfd + - /dev/dri/ + cap_add: + - SYS_PTRACE + group_add: + - video + security_opt: + - seccomp:unconfined + ipc: host + command: --model-id ${FAQGEN_LLM_MODEL_ID} + faqgen-llm-server: + image: ${REGISTRY:-opea}/llm-faqgen-tgi:${TAG:-latest} + container_name: faqgen-llm-server + depends_on: + - faqgen-tgi-service + ports: + - "${FAQGEN_LLM_SERVER_PORT}:9000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TGI_LLM_ENDPOINT: "http://${HOST_IP}:${FAQGEN_TGI_SERVICE_PORT}" + HUGGINGFACEHUB_API_TOKEN: ${FAQGEN_HUGGINGFACEHUB_API_TOKEN} + HUGGING_FACE_HUB_TOKEN: ${FAQGEN_HUGGINGFACEHUB_API_TOKEN} + restart: unless-stopped + faqgen-backend-server: + image: ${REGISTRY:-opea}/faqgen:${TAG:-latest} + container_name: faqgen-backend-server + depends_on: + - faqgen-tgi-service + - faqgen-llm-server + ports: + - "${FAQGEN_BACKEND_SERVER_PORT}:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${HOST_IP} + - LLM_SERVICE_HOST_IP=${HOST_IP} + ipc: host + restart: always + faqgen-ui-server: + image: ${REGISTRY:-opea}/faqgen-ui:${TAG:-latest} + container_name: faqgen-ui-server + depends_on: + - faqgen-backend-server + ports: + - "${FAGGEN_UI_PORT}:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - DOC_BASE_URL="http://${HOST_IP}:${FAQGEN_BACKEND_SERVER_PORT}/v1/faqgen" + ipc: host + restart: always +networks: + default: + driver: bridge diff --git a/FaqGen/tests/test_compose_on_rocm.sh b/FaqGen/tests/test_compose_on_rocm.sh new file mode 100644 index 000000000..82d57b089 --- /dev/null +++ b/FaqGen/tests/test_compose_on_rocm.sh @@ -0,0 +1,167 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + cd $WORKPATH/docker_image_build + git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="faqgen faqgen-ui llm-faqgen-tgi" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker pull ghcr.io/huggingface/text-generation-inference:2.3.1-rocm + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/amd/gpu/rocm + + export FAQGEN_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" + export HOST_IP=${ip_address} + export FAQGEN_TGI_SERVICE_PORT=8008 + export FAQGEN_LLM_SERVER_PORT=9000 + export FAQGEN_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + export FAQGEN_BACKEND_SERVER_PORT=8888 + export FAGGEN_UI_PORT=5173 + export TGI_LLM_ENDPOINT="http://${ip_address}:8008" + export MEGA_SERVICE_HOST_IP=${ip_address} + export LLM_SERVICE_HOST_IP=${ip_address} + export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/faqgen" + + sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env + + # Start Docker Containers + docker compose up -d > ${LOG_PATH}/start_services_with_compose.log + + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs tgi-rocm-server > ${LOG_PATH}/tgi_service_start.log + if grep -q Connected ${LOG_PATH}/tgi_service_start.log; then + break + fi + sleep 5s + n=$((n+1)) + done +} + +function validate_services() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + + # tgi for llm service + validate_services \ + "${ip_address}:8008/generate" \ + "generated_text" \ + "tgi-service" \ + "faqgen-tgi-service" \ + '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' + + # llm microservice + validate_services \ + "${ip_address}:9000/v1/faqgen" \ + "data: " \ + "llm" \ + "faqgen-llm-server" \ + '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' +} + +function validate_megaservice() { + # Curl the Mega Service + validate_services \ + "${ip_address}:8888/v1/faqgen" \ + "Text Embeddings Inference" \ + "mega-faqgen" \ + "faqgen-backend-server" \ + '{"messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' +} + +function validate_frontend() { + cd $WORKPATH/ui/svelte + local conda_env_name="OPEA_e2e" + export PATH=${HOME}/miniforge3/bin/:$PATH + if conda info --envs | grep -q "$conda_env_name"; then + echo "$conda_env_name exist!" + else + conda create -n ${conda_env_name} python=3.12 -y + fi + source activate ${conda_env_name} + + sed -i "s/localhost/$ip_address/g" playwright.config.ts + + conda install -c conda-forge nodejs -y + npm install && npm ci && npx playwright install --with-deps + node -v && npm -v && pip list + + exit_status=0 + npx playwright test || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + +function stop_docker() { + cd $WORKPATH/docker_compose/amd/gpu/rocm + docker compose stop && docker compose rm -f +} + +function main() { + + stop_docker + + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + start_services + + validate_microservices + validate_megaservice + validate_frontend + + stop_docker + echo y | docker system prune + +} + +main From c2b21ef794f1634b23a1591e82d67acf0d23627e Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Fri, 8 Nov 2024 11:46:31 +0700 Subject: [PATCH 2/5] Update FaqGen/docker_compose/amd/gpu/rocm/README.md Co-authored-by: Eero Tamminen Signed-off-by: astafevav --- FaqGen/docker_compose/amd/gpu/rocm/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FaqGen/docker_compose/amd/gpu/rocm/README.md b/FaqGen/docker_compose/amd/gpu/rocm/README.md index f28ba9589..4fb99deb9 100644 --- a/FaqGen/docker_compose/amd/gpu/rocm/README.md +++ b/FaqGen/docker_compose/amd/gpu/rocm/README.md @@ -40,7 +40,7 @@ Example for set isolation for 2 GPUs - /dev/dri/renderD129:/dev/dri/renderD129 ``` -Pelase find more information about accessing and restricting AMD GPUs in the link (https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html#docker-restrict-gpus) +Please find more information about accessing and restricting AMD GPUs in the link (https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html#docker-restrict-gpus) ### Start Microservice Docker Containers From 06aca48003a6ad6969085b76a71f6cc7627110ef Mon Sep 17 00:00:00 2001 From: Artem Astafev Date: Fri, 8 Nov 2024 11:47:00 +0700 Subject: [PATCH 3/5] Update FaqGen/docker_compose/amd/gpu/rocm/README.md Co-authored-by: Eero Tamminen Signed-off-by: astafevav --- FaqGen/docker_compose/amd/gpu/rocm/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/FaqGen/docker_compose/amd/gpu/rocm/README.md b/FaqGen/docker_compose/amd/gpu/rocm/README.md index 4fb99deb9..bbabeb43a 100644 --- a/FaqGen/docker_compose/amd/gpu/rocm/README.md +++ b/FaqGen/docker_compose/amd/gpu/rocm/README.md @@ -2,9 +2,9 @@ ### Required Models -We set default model as "meta-llama/Meta-Llama-3-8B-Instruct", change "LLM_MODEL_ID" in following Environment Variables setting if you want to use other models. +Default model is "meta-llama/Meta-Llama-3-8B-Instruct". Change "LLM_MODEL_ID" in environment variables below if you want to use another model. -If use gated models, you also need to provide [huggingface token](https://huggingface.co/docs/hub/security-tokens) to "HUGGINGFACEHUB_API_TOKEN" environment variable. +For gated models, you also need to provide [HuggingFace token](https://huggingface.co/docs/hub/security-tokens) in "HUGGINGFACEHUB_API_TOKEN" environment variable. ### Setup Environment Variables From 03564bc8c0fcee526ccf91224f4d304a8bf75570 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 04:47:21 +0000 Subject: [PATCH 4/5] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- FaqGen/docker_compose/amd/gpu/rocm/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FaqGen/docker_compose/amd/gpu/rocm/README.md b/FaqGen/docker_compose/amd/gpu/rocm/README.md index bbabeb43a..ae06d4c09 100644 --- a/FaqGen/docker_compose/amd/gpu/rocm/README.md +++ b/FaqGen/docker_compose/amd/gpu/rocm/README.md @@ -2,7 +2,7 @@ ### Required Models -Default model is "meta-llama/Meta-Llama-3-8B-Instruct". Change "LLM_MODEL_ID" in environment variables below if you want to use another model. +Default model is "meta-llama/Meta-Llama-3-8B-Instruct". Change "LLM_MODEL_ID" in environment variables below if you want to use another model. For gated models, you also need to provide [HuggingFace token](https://huggingface.co/docs/hub/security-tokens) in "HUGGINGFACEHUB_API_TOKEN" environment variable. From 3001aa502ea839ce26505ac628a16f3613c47b46 Mon Sep 17 00:00:00 2001 From: astafevav Date: Tue, 12 Nov 2024 17:51:13 +0700 Subject: [PATCH 5/5] Fix test script --- FaqGen/tests/test_compose_on_rocm.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/FaqGen/tests/test_compose_on_rocm.sh b/FaqGen/tests/test_compose_on_rocm.sh index 82d57b089..9e0239b27 100644 --- a/FaqGen/tests/test_compose_on_rocm.sh +++ b/FaqGen/tests/test_compose_on_rocm.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (C) 2024 Intel Corporation +# Copyright (C) 2024 Advanced Micro Devices, Inc. # SPDX-License-Identifier: Apache-2.0 set -xe @@ -48,7 +48,7 @@ function start_services() { n=0 until [[ "$n" -ge 100 ]]; do - docker logs tgi-rocm-server > ${LOG_PATH}/tgi_service_start.log + docker logs faggen-tgi-service > ${LOG_PATH}/tgi_service_start.log if grep -q Connected ${LOG_PATH}/tgi_service_start.log; then break fi @@ -157,7 +157,7 @@ function main() { validate_microservices validate_megaservice - validate_frontend +# validate_frontend stop_docker echo y | docker system prune