diff --git a/.github/workflows/check-online-doc-build.yml b/.github/workflows/check-online-doc-build.yml
new file mode 100644
index 00000000..f4c21fbe
--- /dev/null
+++ b/.github/workflows/check-online-doc-build.yml
@@ -0,0 +1,21 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+name: Check Online Document Building
+permissions: {}
+
+on:
+ pull_request:
+ branches: [main]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Build Online Document
+ shell: bash
+ run: |
+ git config --local --get remote.origin.url
+ echo "build online doc"
+ bash scripts/build.sh
diff --git a/.github/workflows/pr-path-detection.yml b/.github/workflows/pr-path-detection.yml
index f2430bce..aceb81d6 100644
--- a/.github/workflows/pr-path-detection.yml
+++ b/.github/workflows/pr-path-detection.yml
@@ -104,7 +104,7 @@ jobs:
link_head="https://github.com/opea-project/docs/blob/main"
merged_commit=$(git log -1 --format='%H')
changed_files="$(git diff --name-status --diff-filter=ARM ${{ github.event.pull_request.base.sha }} ${merged_commit} | awk '/\.md$/ {print $NF}')"
- png_lines=$(grep -Eo '\]\([^)]+\)' --include='*.md' -r .|grep -Ev 'http'|grep -Ev 'mailto')
+ png_lines=$(grep -Eo '\]\([^)]+\)' --include='*.md' -r .|grep -Ev 'http'|grep -Ev 'mailto'|grep -Ev 'portal.azure.com')
if [ -n "$png_lines" ]; then
for png_line in $png_lines; do
refer_path=$(echo "$png_line"|cut -d':' -f1 | cut -d'/' -f2-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000..2a3bfc2c
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing to OPEA
+
+Welcome to the OPEA open-source community! We are thrilled to have you here and excited about the potential contributions you can bring to the OPEA platform. Whether you are fixing bugs, adding new GenAI components, improving documentation, or sharing your unique use cases, your contributions are invaluable.
+
+Together, we can make OPEA the go-to platform for enterprise AI solutions. Let's work together to push the boundaries of what's possible and create a future where AI is accessible, efficient, and impactful for everyone.
+
+Please check the [Contributing guidelines](https://github.com/opea-project/docs/tree/main/community/CONTRIBUTING.md) for a detailed guide on how to contribute a GenAI component and all the ways you can contribute!
+
+Thank you for being a part of this journey. We can't wait to see what we can achieve together!
+
+# Additional Content
+
+- [Code of Conduct](https://github.com/opea-project/docs/tree/main/community/CODE_OF_CONDUCT.md)
+- [Security Policy](https://github.com/opea-project/docs/tree/main/community/SECURITY.md)
diff --git a/community/rfcs/24-08-02-OPEA-AIAvatarChatbot.md b/community/rfcs/24-08-02-OPEA-AIAvatarChatbot.md
index 09dd97f7..fbfd8c5f 100644
--- a/community/rfcs/24-08-02-OPEA-AIAvatarChatbot.md
+++ b/community/rfcs/24-08-02-OPEA-AIAvatarChatbot.md
@@ -1,7 +1,14 @@
# 24-08-02-OPEA-AIAvatarChatbot
-A RAG-Powered Human-Like AI Avatar Audio Chatbot integrated with OPEA AudioQnA
-
+A Human-Like AI Avatar Audio Chatbot integrated with OPEA AudioQnA
+
+Code contributions:
+"animation" component: https://github.com/opea-project/GenAIComps/tree/main/comps/animation/wav2lip
+"AvatarChatbot" examples: https://github.com/opea-project/GenAIExamples/tree/main/AvatarChatbot
+
+Intel Developer Zone Article "Create an AI Avatar Talking Bot with PyTorch* and Open Platform for Enterprise AI (OPEA)": https://www.intel.com/content/www/us/en/developer/articles/technical/ai-avatar-talking-bot-with-pytorch-and-opea.html
+
+YouTube tech-talk video: https://youtu.be/OjaElyUB8Z0?si=6-IdxwTg0YFMraFl
## Author
@@ -9,9 +16,9 @@ A RAG-Powered Human-Like AI Avatar Audio Chatbot integrated with OPEA AudioQnA
## Status
-v0.1 - ASMO Team sharing on Fri 6/28/2024
-[GenAIComps pr #400](https://github.com/opea-project/GenAIComps/pull/400) (Under Review)
-[GenAIExamples pr #523](https://github.com/opea-project/GenAIExamples/pull/523) (Under Review)
+v0.1 - ASMO Team sharing on Thursday 10/24/2024
+* [GenAIComps pr #775](https://github.com/opea-project/GenAIComps/pull/775) | Merged
+* [GenAIExamples pr #923](https://github.com/opea-project/GenAIExamples/pull/923) | Merged
## Objective
@@ -39,10 +46,10 @@ The chatbot will:
* Use multimodal retrieval-augmented generation (RAG) to generate more accurate, in-domain responses, in v0.2
New microservices include:
-* animation
+* [animation](https://github.com/opea-project/GenAIComps/tree/main/comps/animation/wav2lip)
New megaservices include:
-* AvatarChatbot
+* [AvatarChatbot](https://github.com/opea-project/GenAIExamples/tree/main/AvatarChatbot)
## Motivation
@@ -60,9 +67,9 @@ Related works include [Nvidia Audio2Face](https://docs.nvidia.com/ace/latest/mod
### Avatar Chatbot design
-![avatar chatbot design](assets/design.png)
+![avatar chatbot design](assets/avatar_design.png)
-Currently, the RAG feature using the `embedding` and `dataprep` microservices is missing in the above design, including uploading relevant documents/weblinks, storing them in the database, and retrieving them for the LLM model. These features will be added in v0.2.
+Currently, the RAG feature using the `embedding`, `retrieval`, `reranking` and `dataprep` microservices and VectorDB is missing in the above design, including uploading relevant documents/weblinks, storing them in the database, and retrieving them for the LLM model. These features will be added in v0.2.
Flowchart: AvatarChatbot Megaservice
@@ -217,13 +224,14 @@ End-to-End Inference Time for AvatarChatbot Megaservice (asr -> llm -> tts -> an
On SPR:
~30 seconds for AudioQnA on SPR,
-~40-200 seconds for AvatarAnimation on SPR
+~30-200 seconds for AvatarAnimation on SPR
On Gaudi 2:
~5 seconds for AudioQnA on Gaudi,
-~10-50 seconds for AvatarAnimation on Gaudi, depending on:
+~10-40 seconds for AvatarAnimation on Gaudi, depending on:
1) Whether the input is an image or a multi-frame, fixed-fps video
-1) LipSync Animation DL model used: Wav2Lip_only or Wav2Lip+GFPGAN or SadTalker
-2) Resolution and FPS rate of the resulting mp4 video
+2) The `max_tokens` parameter used in LLM text generation
+3) LipSync Animation DL model used: Wav2Lip_only or Wav2Lip+GFPGAN or SadTalker
+4) Resolution and FPS rate of the resulting mp4 video
-All latency reportings are as of 8/2/2024.
+All latency reportings are as of 10/24/2024.
diff --git a/community/rfcs/assets/avatar_design.png b/community/rfcs/assets/avatar_design.png
new file mode 100644
index 00000000..67c3c135
Binary files /dev/null and b/community/rfcs/assets/avatar_design.png differ
diff --git a/community/rfcs/assets/design.png b/community/rfcs/assets/design.png
deleted file mode 100644
index 1368137d..00000000
Binary files a/community/rfcs/assets/design.png and /dev/null differ
diff --git a/conf.py b/conf.py
index d80915e2..35051272 100644
--- a/conf.py
+++ b/conf.py
@@ -4,6 +4,9 @@
import os
import sys
from datetime import datetime
+import glob
+import shutil
+
sys.path.insert(0, os.path.abspath('.'))
@@ -84,7 +87,7 @@
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
- 'navigation_depth': 3,
+ 'navigation_depth': 4,
}
@@ -114,6 +117,7 @@
)
}
+show_warning_types = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -130,10 +134,31 @@
# paths that contain custom static files (such as style sheets)
html_static_path = ['sphinx/_static']
+def copy_images(src ,dst):
+ image_types = ["png", "svg"]
+ for image_type in image_types:
+ pattern = "{}/**/*.{}".format(src, image_type)
+ files = glob.glob(pattern, recursive = True)
+ for file in files:
+ sub_name = file.replace(src, '')
+ dst_filename = "{}{}".format(dst, sub_name)
+ folder = os.path.dirname(dst_filename)
+ if not os.path.exists(folder):
+ os.makedirs(folder)
+ shutil.copy(file, dst_filename)
+
+def copy_image_to_html(app, docname):
+ if app.builder.name == 'html':
+ if os.path.exists(app.srcdir) and os.path.exists(app.outdir):
+ copy_images(str(app.srcdir) ,str(app.outdir))
+ else:
+ print("No existed {} or {}".format(app.srcdir ,app.outdir))
+
def setup(app):
app.add_css_file("opea-custom.css")
app.add_js_file("opea-custom.js")
+ app.connect('build-finished', copy_image_to_html)
# Disable "Created using Sphinx" in the HTML footer. Default is True.
html_show_sphinx = False
diff --git a/examples/examples.rst b/examples/examples.rst
new file mode 100644
index 00000000..a86a36e1
--- /dev/null
+++ b/examples/examples.rst
@@ -0,0 +1,15 @@
+Examples
+##########
+
+.. rst-class:: rst-columns
+
+.. contents::
+ :local:
+ :depth: 1
+
+----
+
+.. comment This include file is generated in the Makefile during doc build
+ time from all the directories found in the GenAIExamples top level directory
+
+.. include:: examples.txt
\ No newline at end of file
diff --git a/examples/index.rst b/examples/index.rst
index c0d192a0..6524dc1d 100644
--- a/examples/index.rst
+++ b/examples/index.rst
@@ -25,19 +25,6 @@ We're building this documentation from content in the
:glob:
/GenAIExamples/README
+ examples.rst
/GenAIExamples/*
-**Example Applications Table of Contents**
-
-.. rst-class:: rst-columns
-
-.. contents::
- :local:
- :depth: 1
-
-----
-
-.. comment This include file is generated in the Makefile during doc build
- time from all the directories found in the GenAIExamples top level directory
-
-.. include:: examples.txt
diff --git a/getting-started/README.md b/getting-started/README.md
index f3bab89f..a8c80ee4 100644
--- a/getting-started/README.md
+++ b/getting-started/README.md
@@ -1,110 +1,233 @@
# Getting Started with OPEA
+In this document, we provide a tailored guide to deploying the ChatQnA application in OPEA GenAI Examples across multiple cloud platforms, including Amazon Web Services (AWS), Google Cloud Platform (GCP), IBM Cloud, Microsoft Azure and Oracle Cloud Infrastructure, enabling you to choose the best fit for your specific needs and requirements. For additional deployment targets, see the [ChatQnA Sample Guide](https://opea-project.github.io/latest/examples/ChatQnA/ChatQnA_Guide.html).
+
+## Understanding OPEA's Core Components
+
+Before moving forward, it's important to familiarize yourself with two key elements of OPEA: GenAIComps and GenAIExamples.
+
+- GenAIComps is a collection of microservice components that form a service-based toolkit. This includes a variety of services such as llm (large language models), embedding, and reranking, among others.
+- GenAIExamples provides practical and deployable solutions to help users implement these services effectively. Examples include ChatQnA and DocSum, which leverage the microservices for specific applications.
+
## Prerequisites
-To get started with OPEA you need the right hardware and basic software setup.
+## Create and Configure a Virtual Server
-- Hardware Requirements: For the hardware configuration, If you need Hardware Access visit the [Intel Tiber Developer Cloud](https://cloud.intel.com) to select from options such as Xeon or Gaudi processors that meet the necessary specifications.
+::::{tab-set}
+:::{tab-item} Amazon Web Services
+:sync: AWS
-- Software Requirements: Refer to the [Support Matrix](https://github.com/opea-project/GenAIExamples/blob/main/README.md#getting-started) to ensure you have the required software components in place.
+1. Navigate to [AWS console](https://console.aws.amazon.com/console/home) – Search EC2 in the search bar and select it. Click the "Launch Instance" button highlighted in orange.
-Note : If you are deploying it on cloud, say AWS, select a VM instance from R7iz or m7i family of instances with base OS as Ubuntu 22.04 (AWS ami id : ami-05134c8ef96964280). Use the command below to install docker on a clean machine.
-```
-wget https://raw.githubusercontent.com/opea-project/GenAIExamples/refs/heads/main/ChatQnA/docker_compose/install_docker.sh
-chmod +x install_docker.sh
-./install_docker.sh
-```
-## Understanding OPEA's Core Components
+2. Provide a name to the VM.
-Before moving forward, it's important to familiarize yourself with two key elements of OPEA: GenAIComps and GenAIExamples.
+3. In Quick Start, select the base OS as Ubuntu (`ami-id : ami-04dd23e62ed049936`).
+
+4. Select an Instance type that is based on Intel hardware.
+
+>**Note**: We recommend selecting a `m7i.4xlarge` or larger instance with an Intel(R) 4th Gen Xeon(C) Scalable Processor. For more information on virtual servers on AWS visit [here](https://aws.amazon.com/intel/).
+
+5. Next, create a new key pair, give it a name or select one from the existing key pairs.
+
+6. Under Network settings select an existing security group. If there is none, create a new one by selecting the Create security group radio button and select the Allow SSH traffic and Allow HTTP traffic check box.
+
+7. Configure the storage to 100 GiB and click "Launch Instance".
+
+8. Click on the "connect" button on the top right and connect using your preferred method.
+
+9. Look up Security Groups in the search bar and select the security group used when creating the instance.
+
+10. Click on the Edit inbound rules on the right side of the window.
+
+11. Select Add rule at the bottom, and create a rule with type as Custom TCP , port range as 80 and source as 0.0.0.0/0 . Learn more about [editing inbound/outbound rules](https://docs.aws.amazon.com/finspace/latest/userguide/step5-config-inbound-rule.html)
+
+:::
+:::{tab-item} Google Cloud Platform
+:sync: GCP
+
+1. Navigate to [GCP console](https://console.cloud.google.com/) – Click the "Create a VM" button.
+
+2. Provide a name to the VM.
+
+3. Select the base OS as `Ubuntu 24.04 LTS` from Marketplace .
+
+4. Select an Instance type that is based on Intel hardware.
+
+> **Note:** We recommend selecting a `c4-standard-32` or larger instance with an Intel(R) 4th Gen Xeon(C) Scalable Processor, and the minimum supported c3 instance type is c3-standard-8 with 32GB memory. For more information, visit [virtual servers on GCP](https://cloud.google.com/intel).
+
+5. Under Firewall settings select “Allow HTTP traffic” to access ChatQnA UI web portal.
+
+6. Change the Boot disk to 100 GiB and click "Create".
+
+7. Use any preferred SSH method such as "Open in browser window" to connect to the instance.
+
+:::
+:::{tab-item} IBM Cloud
+:sync: IBM Cloud
+
+1. Navigate to [IBM Cloud](https://cloud.ibm.com). - Click the "Create resource" button at the top right of the screen. Select "Compute" from the options available and select "Virtual Server for VPC"
+
+2. Select a location for the instance. Assign a name to it.
+
+3. Under Stock Images, select Ubuntu 24.04 (`ibm-ubuntu-24-04-6-minimal-amd64-1`)
+
+4. Select a virtual server.
+
+> **Note:** We recommend selecting a 3-series instance with an Intel(R) 4th Gen Xeon(C) Scalable Processor, such as `bx3d-16x80` or above. For more information on virtual servers on IBM cloud visit [Intel® solutions on IBM Cloud®](https://www.ibm.com/cloud/intel).
+
+5. Add an SSH key to the instance, if necessary, create one first.
+
+6. Click on "Create virtual server".
+
+7. Once the instance is running, create and attach a "Floating IP" to the instance. For more information visit [this](https://cloud.ibm.com/docs/vpc?topic=vpc-fip-working&interface=ui) site
+
+8. Under "Infrastructure" in the left pane, go to Network/Security groups//Rules
+
+9. Select "Create"
+
+10. Enable inbound traffic for port 80. For more information on editing inbound/outbound rules, click [here](https://cloud.ibm.com/docs/vpc?topic=vpc-updating-the-default-security-group&interface=ui)
+
+11. ssh into the instance using the floating IP (`ssh -i ubuntu@`)
+:::
+:::{tab-item} Microsoft Azure
+:sync: Azure
+
+1. Navigate to [Microsoft Azure](portal.azure.com) – Select the "Skip" button on the bottom right to land on the service offerings page. Search for "Virtual Machines" in the search bar and select it. Click the "Create" button and select "Azure Virtual Machine".
+
+2. Select an existing "Resource group" from the drop down or click "Create" for a new Resource group and give it a name. If you have issues refer to [cannot create resource groups](https://learn.microsoft.com/en-us/answers/questions/1520133/cannot-create-resource-groups).
+
+3. Provide a name to the VM and select the base OS as `Ubuntu 24.04 LTS`
-- GenAIComps is a collection of microservice components that form a service-based toolkit. This includes a variety of services such as llm (language learning models), embedding, and reranking, among others.
-- While GenAIComps offers a range of microservices, GenAIExamples provides practical, deployable solutions to help users implement these services effectively. Examples include ChatQnA and DocSum, which leverage the microservices for specific applications.
+4. Select x64 in VM architecture.
-## Visual Guide to Deployment
-To illustrate, here's a simplified visual guide on deploying a ChatQnA GenAIExample, showcasing how you can set up this solution in just a few steps.
+5. Select an Instance type that is based on Intel hardware.
-![Getting started with OPEA](assets/getting_started.gif)
+>**Note**: We recommend selecting a `Standard_D16ds_v5` instance or larger with an Intel(R) 3rd/4th Gen Xeon(C) Scalable Processor. You can find this family of instances in the (US) West US Region. Visit for more information [virtual machines on Azure](https://azure.microsoft.com/en-us/partners/directory/intel-corporation).
-## Setup ChatQnA Parameters
-To deploy ChatQnA services, follow these steps:
+6. Select Password as Authentication type and create username and password for your instance.
+7. Choose the Allow selected ports in Inbound port rule section and select HTTP.
+
+8. Click "Next: Disk" button and select OS disk size as 128GiB.
+
+9. Click on "Review + Create" to launch the VM.
+
+10. Click Go to resource -> Connect -> Connect -> SSH using Azure CLI. Accept the terms and then select "Configure + connect"
+
+>**Note**: If you have issues connecting to the instance with SSH, you could use instead Bastion with your username and password.
+:::
+:::{tab-item} Oracle Cloud Infrastructure
+:sync: OCI
+
+1. Login to [Oracle Cloud Console](https://www.oracle.com/cloud/sign-in.html?redirect_uri=https%3A%2F%2Fcloud.oracle.com%2F) – Then navigate to [Compute Instances](https://cloud.oracle.com/compute/instances). Click the "Create Instance" button.
+
+2. Provide a name to the VM and select the placement in the availability domains.
+
+3. In Image and Shape section click "Change Image" > "Ubuntu" and then select `Canonical Ubuntu 24.04`. Submit using the "Select Image" button at the bottom.
+
+4. Click the "Change Shape" > "Bare Metal Machine" then select the `BM.Standard3.64`. Submit using the "Select Shape" button at the bottom.
+
+5. Select the VCN and the public subnet that the server needs to reside in. If a new VCN/Subnet needs to be created then select the "Create new virtual cloud network" and the "Create new public subnet" to create a subnet that is exposed to the internet.
+
+6. Next, save a private key by or upload an existing public key.
+
+7. Specify a boot volume size of 100 GiB with 30 VPU units of performance.
+
+8. Click Create to launch the instance.
+
+9. Note the public IP address of the machine once its launched.
+
+10. Once the instance is launched, click on the subnet in the Primary VNIC section. Then click on the "Default Security List for vcn-xxxxxxxx-xxxx" , click on the "Add Ingress Rules". Add the following information:
+ Source CIDR: 0.0.0.0/0
+ Source Port Range : All
+ Destination Port Range : 80
+ Click on "Save"
+
+11. Connect using ssh (`ssh -i ubuntu@`).
+
+:::
+::::
+
+
+## Deploy the ChatQnA Solution
+Use the command below to install docker:
+```bash
+wget https://raw.githubusercontent.com/opea-project/GenAIExamples/refs/heads/main/ChatQnA/docker_compose/install_docker.sh
+chmod +x install_docker.sh
+./install_docker.sh
```
+Configure Docker to run as a non-root user by following these [instructions](https://docs.docker.com/engine/install/linux-postinstall/)
+
+Clone the repo and navigate to ChatQnA
+
+```bash
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/ChatQnA
```
-### Set the required environment variables:
-```
-# Example: host_ip="192.168.1.1"
-export host_ip="External_Public_IP"
-# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
-export no_proxy="Your_No_Proxy"
+Set the required environment variables:
+```bash
+export host_ip="localhost"
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
```
-If you are in a proxy environment, also set the proxy-related environment variables:
+
+Set up other specific use-case environment variables:
+```bash
+cd docker_compose/intel/cpu/xeon/
+source set_env.sh
```
-export http_proxy="Your_HTTP_Proxy"
-export https_proxy="Your_HTTPs_Proxy"
+Now we can start the services
+```bash
+docker compose up -d
```
+>**Note**: It takes a few minutes for the services to start. Check the logs for the services to ensure that ChatQnA is running before proceeding further.
-Set up other specific use-case environment variables by choosing one of these options, according to your hardware:
+For example to check the logs for the `tgi-service`:
+```bash
+docker logs tgi-service | grep Connected
```
-# on Xeon
-source ./docker_compose/intel/cpu/xeon/set_env.sh
-# on Gaudi
-source ./docker_compose/intel/hpu/gaudi/set_env.sh
-# on Nvidia GPU
-source ./docker_compose/nvidia/gpu/set_env.sh
-```
-
-## Deploy ChatQnA Megaservice and Microservices
-Select the directory containing the `compose.yaml` file that matches your hardware.
+Proceed further **only after** the output shows `Connected` as shown:
```
-#xeon
-cd docker_compose/intel/cpu/xeon/
-#gaudi
-cd docker_compose/intel/hpu/gaudi/
-#nvidia
-cd docker_compose/nvidia/gpu/
+tgi-service | 2024-10-18T22:41:18.973042Z INFO text_generation_router::server: router/src/server.rs:2311: Connected
```
-Now we can start the services
+
+Run `docker ps -a` as an additional check to verify that all the services are running as shown:
+
```
-docker compose up -d
+| CONTAINER ID | IMAGE | COMMAND | CREATED | STATUS | PORTS | NAMES |
+|--------------|------------------------------------------------------------------------|------------------------|--------------|-------------|------------------------------------------------------------------------------------------|------------------------------|
+| 3a65ff9e16bd | opea/nginx:latest | `/docker-entrypoint.\…`| 14 hours ago | Up 14 hours | 0.0.0.0:80->80/tcp, :::80->80/tcp | chatqna-xeon-nginx-server |
+| 7563b2ee1cd9 | opea/chatqna-ui:latest | `docker-entrypoint.s\…`| 14 hours ago | Up 14 hours | 0.0.0.0:5173->5173/tcp, :::5173->5173/tcp | chatqna-xeon-ui-server |
+| 9ea57a660cd6 | opea/chatqna:latest | `python chatqna.py` | 14 hours ago | Up 14 hours | 0.0.0.0:8888->8888/tcp, :::8888->8888/tcp | chatqna-xeon-backend-server |
+| 451bacaac3e6 | opea/retriever-redis:latest | `python retriever_re\…`| 14 hours ago | Up 14 hours | 0.0.0.0:7000->7000/tcp, :::7000->7000/tcp | retriever-redis-server |
+| c1f952ef5c08 | opea/dataprep-redis:latest | `python prepare_doc_\…`| 14 hours ago | Up 14 hours | 0.0.0.0:6007->6007/tcp, :::6007->6007/tcp | dataprep-redis-server |
+| 2a874ed8ce6f | redis/redis-stack:7.2.0-v9 | `/entrypoint.sh` | 14 hours ago | Up 14 hours | 0.0.0.0:6379->6379/tcp, :::6379->6379/tcp, 0.0.0.0:8001->8001/tcp, :::8001->8001/tcp | redis-vector-db |
+| ac7b62306eb8 | ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 | `text-embeddings-rou\…`| 14 hours ago | Up 14 hours | 0.0.0.0:8808->80/tcp, [::]:8808->80/tcp | tei-reranking-server |
+| 521cc7faa00e | ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu | `text-generation-lau\…`| 14 hours ago | Up 14 hours | 0.0.0.0:9009->80/tcp, [::]:9009->80/tcp | tgi-service |
+| 9faf553d3939 | ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 | `text-embeddings-rou\…`| 14 hours ago | Up 14 hours | 0.0.0.0:6006->80/tcp, [::]:6006->80/tcp | tei-embedding-server |
+
```
-It will automatically download the needed docker images from docker hub:
-- docker pull opea/chatqna:latest
-- docker pull opea/chatqna-ui:latest
+### Interact with ChatQnA
-In the following cases, you will need to build the docker image from source by yourself.
+You can interact with ChatQnA via a browser interface:
-- The docker image failed to download. (You may want to first check the
- [Docker Images](https://github.com/opea-project/GenAIExamples/blob/main/docker_images_list.md)
- list and verify that the docker image you're downloading exists on dockerhub.)
-- You want to use a different version than latest.
+* To view the ChatQnA interface, open a browser and navigate to the UI by inserting your public facing IP address in the following: `http://{public_ip}:80’.
-Refer to the {ref}`ChatQnA Example Deployment Options ` section for building from source instructions matching your hardware.
+We can go ahead and ask a sample question, say 'What is OPEA?'.
-## Interact with ChatQnA Megaservice and Microservice
-Before interact ChatQnA Service, make sure the TGI service is ready (which takes up to 2 minutes to start).
-```
-docker ps
-# expected: all images's status are up
-# TGI example on on Xeon and Nvidia GPU
-docker logs tgi-service | grep Connected
-# TGI example on on Gaudi
-docker logs tgi-gaudi-service | grep Connected
-# execpted output: ... INFO text_generation_router::server: router/src/server.rs:2311: Connected
-```
-```
-curl http://${host_ip}:8888/v1/chatqna \
- -H "Content-Type: application/json" \
- -d '{
- "messages": "What is the revenue of Nike in 2023?"
- }'
-```
-This command will provide the response as a stream of text. You can modify the `message` parameter in the `curl` command and interact with the ChatQnA service.
+A snapshot of the interface looks as follows:
+
+![Chat Interface](assets/chat_ui_response.png)
+
+Given that any information about OPEA was not in the training data for the model, we see the model hallucinating and coming up with a response. We can upload a document (PDF) with information and observe how the response changes.
+
+> **Note:** this example leverages the OPEA document for its RAG based content. You can download the [OPEA document](assets/what_is_opea.pdf) and upload it using the UI.
+
+![Chat Interface with RAG](assets/chat_ui_response_rag.png)
+
+We observe that the response is relevant and is based on the PDF uploaded. See the [ChatQnA Sample Guide](https://opea-project.github.io/latest/examples/ChatQnA/ChatQnA_Guide.html)
+to learn how you can customize the example with your own content.
## What’s Next
@@ -113,26 +236,26 @@ This command will provide the response as a stream of text. You can modify the `
### Get Involved
-Have you ideas and skills to build out genAI components, microservices, and solutions? Would you like to be a part of this evolving technology in its early stages? Welcome!
-* Register for our mailing list:
- * General: https://lists.lfaidata.foundation/g/OPEA-announce
- * Technical Discussions: https://lists.lfaidata.foundation/g/OPEA-technical-discuss
+Have you ideas and skills to build out genAI components, microservices, and solutions? Would you like to be a part of this evolving technology in its early stages? Welcome!
+* Register for our mailing list:
+ * [Mailing List](https://lists.lfaidata.foundation/g/OPEA-announce)
+ * [Technical Discussions](https://lists.lfaidata.foundation/g/OPEA-technical-discuss)
* Subscribe to the working group mailing lists that interest you
- * End user https://lists.lfaidata.foundation/g/OPEA-End-User
- * Evaluation https://lists.lfaidata.foundation/g/OPEA-Evaluation
- * Community https://lists.lfaidata.foundation/g/OPEA-Community
- * Research https://lists.lfaidata.foundation/g/OPEA-Research
- * Security https://lists.lfaidata.foundation/g/OPEA-Security
-* Go to the Community Section of the OPEA repo for Contribution Guidelines and step by step instructions.
-* Attend any of our community events and hackathons. https://wiki.lfaidata.foundation/display/DL/OPEA+Community+Events
+ * [End user](https://lists.lfaidata.foundation/g/OPEA-End-User)
+ * [Evaluation](https://lists.lfaidata.foundation/g/OPEA-Evaluation)
+ * [Community](https://lists.lfaidata.foundation/g/OPEA-Community)
+ * [Research](https://lists.lfaidata.foundation/g/OPEA-Research)
+ * [Security](https://lists.lfaidata.foundation/g/OPEA-Security)
+* Go to the Community Section of the OPEA repo for [Contribution Guidelines](https://opea-project.github.io/latest/community/CONTRIBUTING.html) and step by step instructions.
+* Attend any of our community events and hackathons. https://wiki.lfaidata.foundation/display/DL/OPEA+Community+Events
Current GenAI Examples
-- Simple chatbot that uses retrieval augmented generation (RAG) architecture. [ChatQnA](/examples/ChatQnA/ChatQnA_Guide.rst)
-- Code generation, from enabling non-programmers to generate code to improving productivity with code completion of complex applications. [CodeGen]
-- Make your applications more flexible by porting to different languages. [CodeTrans](https://opea-project.github.io/latest/GenAIExamples/CodeGen/README.html)
+- Simple chatbot that uses retrieval augmented generation (RAG) architecture. [ChatQnA](/examples/ChatQnA/ChatQnA_Guide.rst)
+- Code generation, from enabling non-programmers to generate code to improving productivity with code completion of complex applications. [CodeGen](https://opea-project.github.io/latest/GenAIExamples/CodeGen/README.html)
+- Make your applications more flexible by porting to different languages. [CodeTrans](https://opea-project.github.io/latest/GenAIExamples/CodeTrans/README.html)
- Create summaries of news articles, research papers, technical documents, etc. to streamline content systems. [DocSum](https://opea-project.github.io/latest/GenAIExamples/DocSum/README.html)
- Mimic human behavior by iteratively searching, selecting, and synthesizing information across large bodies of content. [SearchQnA](https://opea-project.github.io/latest/GenAIExamples/SearchQnA/README.html)
- Provide critical content to your customers by automatically generating Frequently Asked Questions (FAQ) resources. [FaqGen](https://opea-project.github.io/latest/GenAIExamples/FaqGen/README.html)
- Provide text descriptions from pictures, enabling your users to inquire directly about products, services, sites, etc. [VisualQnA](https://opea-project.github.io/latest/GenAIExamples/VisualQnA/README.html)
-- Reduce language barriers through customizable text translation systems. [Translation](https://opea-project.github.io/latest/GenAIExamples/Translation/README.html)
+- Reduce language barriers through customizable text translation systems. [Translation](https://opea-project.github.io/latest/GenAIExamples/Translation/README.html)
diff --git a/getting-started/assets/chat_ui_response.png b/getting-started/assets/chat_ui_response.png
new file mode 100644
index 00000000..a4472731
Binary files /dev/null and b/getting-started/assets/chat_ui_response.png differ
diff --git a/getting-started/assets/chat_ui_response_rag.png b/getting-started/assets/chat_ui_response_rag.png
new file mode 100644
index 00000000..b5e27fa9
Binary files /dev/null and b/getting-started/assets/chat_ui_response_rag.png differ
diff --git a/getting-started/assets/getting_started.gif b/getting-started/assets/getting_started.gif
deleted file mode 100644
index 557d7894..00000000
Binary files a/getting-started/assets/getting_started.gif and /dev/null differ
diff --git a/getting-started/assets/what_is_opea.pdf b/getting-started/assets/what_is_opea.pdf
new file mode 100644
index 00000000..a9e1141b
Binary files /dev/null and b/getting-started/assets/what_is_opea.pdf differ
diff --git a/index.rst b/index.rst
index 42974299..4c6b3f37 100644
--- a/index.rst
+++ b/index.rst
@@ -76,6 +76,7 @@ Source code for the OPEA Project is maintained in the
developer-guides/index
community/index
release_notes/index
+ CONTRIBUTING
faq
.. _OPEA Project GitHub repository: https://github.com/opea-project
diff --git a/scripts/build.sh b/scripts/build.sh
index 245ce56d..9cedec55 100755
--- a/scripts/build.sh
+++ b/scripts/build.sh
@@ -40,6 +40,14 @@ echo "Build HTML"
cd docs
make clean
make html
+retval=$?
+echo "result = $retval"
+if [ $retval -ne 0 ]; then
+ echo "make html is error"
+ exit 1
+else
+ echo "Done"
+fi
if [ ! -d _build/html ]; then
echo "Build online doc is wrong!"
diff --git a/scripts/filter-doc-log.sh b/scripts/filter-doc-log.sh
index 61f2417c..a7cbd90a 100755
--- a/scripts/filter-doc-log.sh
+++ b/scripts/filter-doc-log.sh
@@ -9,7 +9,7 @@
# Only argument is the name of the log file saved by the build.
KI_SCRIPT=scripts/filter-known-issues.py
-CONFIG_DIR=.known-issues/doc
+CONFIG_DIR=.known-issues/
LOG_FILE=$1
BUILDDIR=$(dirname $LOG_FILE)
@@ -32,7 +32,7 @@ else
fi
if [ -s "${LOG_FILE}" ]; then
- $KI_SCRIPT --config-dir ${CONFIG_DIR} ${LOG_FILE} > ${BUILDDIR}/doc.warnings 2>&1
+ python3 $KI_SCRIPT --config-dir ${CONFIG_DIR} ${LOG_FILE} > ${BUILDDIR}/doc.warnings 2>&1
if [ -s ${BUILDDIR}/doc.warnings ]; then
echo
echo -e "${red}New errors/warnings found, please fix them:"
@@ -41,7 +41,7 @@ if [ -s "${LOG_FILE}" ]; then
echo
cat ${BUILDDIR}/doc.warnings
echo
- exit 1
+ exit 2
else
echo -e "${green}No new errors/warnings."
$TPUT sgr0
@@ -49,5 +49,5 @@ if [ -s "${LOG_FILE}" ]; then
else
echo "Error in $0: logfile \"${LOG_FILE}\" not found."
- exit 1
+ exit 3
fi
diff --git a/scripts/filter-known-issues.py b/scripts/filter-known-issues.py
index 79f56fd1..256aa3be 100755
--- a/scripts/filter-known-issues.py
+++ b/scripts/filter-known-issues.py
@@ -113,12 +113,21 @@ def config_import(paths):
_paths = []
# Go over the list, flush it if the user gave an empty path ("")
for path in paths:
+ if not os.path.exists(path):
+ logging.debug("{} not exist".format(path))
+ continue
+
if path == "" or path is None:
logging.debug("flushing current config list: %s", _paths)
_paths = []
else:
_paths.append(path)
logging.debug("config list: %s", _paths)
+
+ if len(_paths)==0:
+ logging.debug("No available path")
+ assert(False)
+
for path in _paths:
config_import_path(path)
@@ -146,8 +155,7 @@ def config_import(paths):
logging.basicConfig(level=40 - 10 * (args.verbosity - args.quiet),
format="%(levelname)s: %(message)s")
-path = ".known-issues/"
-logging.debug("Reading configuration from directory `%s`", path)
+logging.debug("Reading configuration from directory `%s`", args.config_dir)
config_import(args.config_dir)
exclude_ranges = []
diff --git a/sphinx/_static/opea-custom.js b/sphinx/_static/opea-custom.js
index ac0f4453..cb2d0221 100644
--- a/sphinx/_static/opea-custom.js
+++ b/sphinx/_static/opea-custom.js
@@ -2,7 +2,7 @@
$(document).ready(function(){
/* tweak logo link to the marketing site instead of doc site */
- $( ".icon-home" ).attr({href: "https://opea.dev/", target: "_blank"});
+ $( ".icon-home" ).attr({href: "https://opea-project.github.io/latest/index.html", target: "_blank"});
/* open external links in a new tab */
$('a[class*=external]').attr({target: '_blank', rel: 'noopener'});