From 00b526c8e5a0f0663fa4c7754ac7ca7377c41284 Mon Sep 17 00:00:00 2001 From: Omar Khleif Date: Wed, 11 Dec 2024 11:04:10 -0800 Subject: [PATCH] Changed Default UI to Gradio (#1246) Signed-off-by: okhleif-IL --- DocSum/docker_compose/intel/cpu/xeon/README.md | 12 ++++++------ DocSum/docker_compose/intel/cpu/xeon/compose.yaml | 4 ++-- DocSum/docker_compose/intel/hpu/gaudi/README.md | 12 ++++++------ DocSum/docker_compose/intel/hpu/gaudi/compose.yaml | 4 ++-- DocSum/tests/test_compose_on_gaudi.sh | 2 +- DocSum/tests/test_compose_on_xeon.sh | 2 +- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/DocSum/docker_compose/intel/cpu/xeon/README.md b/DocSum/docker_compose/intel/cpu/xeon/README.md index 82cbcf841..212f5693d 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/README.md +++ b/DocSum/docker_compose/intel/cpu/xeon/README.md @@ -67,22 +67,22 @@ docker build -t opea/docsum:latest --build-arg https_proxy=$https_proxy --build- Several UI options are provided. If you need to work with multimedia documents, .doc, or .pdf files, suggested to use Gradio UI. -#### Svelte UI +#### Gradio UI -Build the frontend Docker image via below command: +Build the Gradio UI frontend Docker image using the following command: ```bash cd GenAIExamples/DocSum/ui -docker build -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile . +docker build -t opea/docsum-gradio-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile.gradio . ``` -#### Gradio UI +#### Svelte UI -Build the Gradio UI frontend Docker image using the following command: +Build the frontend Docker image via below command: ```bash cd GenAIExamples/DocSum/ui -docker build -t opea/docsum-gradio-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile.gradio . +docker build -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile . ``` #### React UI diff --git a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml index 72332a901..170cdc79b 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml +++ b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml @@ -95,8 +95,8 @@ services: ipc: host restart: always - docsum-ui: - image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest} + docsum-gradio-ui: + image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest} container_name: docsum-xeon-ui-server depends_on: - docsum-xeon-backend-server diff --git a/DocSum/docker_compose/intel/hpu/gaudi/README.md b/DocSum/docker_compose/intel/hpu/gaudi/README.md index 172f24d67..f9f3d6af5 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/README.md +++ b/DocSum/docker_compose/intel/hpu/gaudi/README.md @@ -51,22 +51,22 @@ docker build -t opea/docsum:latest --build-arg https_proxy=$https_proxy --build- Several UI options are provided. If you need to work with multimedia documents, .doc, or .pdf files, suggested to use Gradio UI. -#### Svelte UI +#### Gradio UI -Build the frontend Docker image via below command: +Build the Gradio UI frontend Docker image using the following command: ```bash cd GenAIExamples/DocSum/ui -docker build -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile . +docker build -t opea/docsum-gradio-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile.gradio . ``` -#### Gradio UI +#### Svelte UI -Build the Gradio UI frontend Docker image using the following command: +Build the frontend Docker image via below command: ```bash cd GenAIExamples/DocSum/ui -docker build -t opea/docsum-gradio-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile.gradio . +docker build -t opea/docsum-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f docker/Dockerfile . ``` #### React UI diff --git a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml index 39bb3d477..2e8211fdf 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml @@ -108,8 +108,8 @@ services: ipc: host restart: always - docsum-ui: - image: ${REGISTRY:-opea}/docsum-ui:${TAG:-latest} + docsum-gradio-ui: + image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest} container_name: docsum-gaudi-ui-server depends_on: - docsum-gaudi-backend-server diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index 7b9ff4926..b26bb7eab 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -46,7 +46,7 @@ function build_docker_images() { git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-ui whisper dataprep-multimedia2text dataprep-audio2text dataprep-video2audio llm-docsum-tgi" + service_list="docsum docsum-gradio-ui whisper dataprep-multimedia2text dataprep-audio2text dataprep-video2audio llm-docsum-tgi" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6 diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index 60da44a7e..2b32f4438 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -45,7 +45,7 @@ function build_docker_images() { git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../ echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-ui whisper dataprep-multimedia2text dataprep-audio2text dataprep-video2audio llm-docsum-tgi" + service_list="docsum docsum-gradio-ui whisper dataprep-multimedia2text dataprep-audio2text dataprep-video2audio llm-docsum-tgi" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log docker pull ghcr.io/huggingface/text-generation-inference:1.4