Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add SQL agent strategy #975

Merged
merged 38 commits into from
Dec 11, 2024
Merged
Changes from 1 commit
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
bd44bb6
initial code for sql agent llama
minmin-intel Dec 3, 2024
26d7964
add test for sql agent
minmin-intel Dec 4, 2024
ad4969a
update sql agent test
minmin-intel Dec 5, 2024
a6ef75d
fix bugs and use vllm to test sql agent
minmin-intel Dec 5, 2024
a63d3bc
add tag-bench test and google search tool
minmin-intel Dec 5, 2024
c1ca4b4
test sql agent with hints
minmin-intel Dec 5, 2024
cc56a6c
fix bugs for sql agent with hints and update test
minmin-intel Dec 6, 2024
d063bf9
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 6, 2024
6191bd9
Merge branch 'main' into sql-agent-v1.2
minmin-intel Dec 6, 2024
1f207b1
add readme for sql agent and fix ci bugs
minmin-intel Dec 6, 2024
eb692b1
Merge branch 'sql-agent-v1.2' of https://github.com/minmin-intel/GenA…
minmin-intel Dec 6, 2024
48ca517
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 6, 2024
66c27e2
add sql agent using openai models
minmin-intel Dec 6, 2024
fde2d72
Merge branch 'sql-agent-v1.2' of https://github.com/minmin-intel/GenA…
minmin-intel Dec 6, 2024
a8897b5
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 6, 2024
a93f0d4
fix bugs in sql agent openai
minmin-intel Dec 6, 2024
c154f84
Merge branch 'sql-agent-v1.2' of https://github.com/minmin-intel/GenA…
minmin-intel Dec 6, 2024
b789495
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 6, 2024
0644105
make wait time longer for sql agent microservice to be ready
minmin-intel Dec 9, 2024
48935b0
update readme
minmin-intel Dec 9, 2024
b86bbea
fix test bug
minmin-intel Dec 9, 2024
88c796d
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 9, 2024
7622cfd
skip planexec with vllm due to vllm-gaudi bug
minmin-intel Dec 9, 2024
5f76d82
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 9, 2024
b80ee58
debug ut issue
minmin-intel Dec 9, 2024
ac0fc86
Merge branch 'sql-agent-v1.2' of https://github.com/minmin-intel/GenA…
minmin-intel Dec 9, 2024
9d85cb3
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 10, 2024
b5c0e2a
use vllm for all uts
minmin-intel Dec 10, 2024
e02b4cd
Merge branch 'sql-agent-v1.2' of https://github.com/minmin-intel/GenA…
minmin-intel Dec 10, 2024
4b239ec
debug ci issue
minmin-intel Dec 10, 2024
3c1a220
Merge branch 'sql-agent-v1.2' of https://github.com/minmin-intel/GenA…
minmin-intel Dec 10, 2024
bb0d24d
change vllm port
minmin-intel Dec 10, 2024
f5e29d9
update ut
minmin-intel Dec 10, 2024
faeeeb6
Merge branch 'main' into sql-agent-v1.2
minmin-intel Dec 10, 2024
8dfccaa
remove tgi server
minmin-intel Dec 10, 2024
1feb779
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 10, 2024
14c61f9
align vllm port
minmin-intel Dec 11, 2024
880fe43
Merge branch 'main' into sql-agent-v1.2
minmin-intel Dec 11, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
use vllm for all uts
Signed-off-by: minmin-intel <[email protected]>
minmin-intel committed Dec 10, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
commit b5c0e2aace2a8a362dd24d12245f12117e824fbf
12 changes: 6 additions & 6 deletions tests/agent/sql_agent_test/test_sql_agent.sh
Original file line number Diff line number Diff line change
@@ -20,10 +20,10 @@ export agent_container_name="test-comps-agent-endpoint"

export ip_address=$(hostname -I | awk '{print $1}')

vllm_port=8084
vllm_volume=$WORKPATH #${HF_CACHE_DIR}
echo "vllm volume: $vllm_volume"
ls $vllm_volume
vllm_port=8085
vllm_volume=${HF_CACHE_DIR} #$WORKPATH #${HF_CACHE_DIR}
# echo "vllm volume: $vllm_volume"
# ls $vllm_volume/hub

export model=meta-llama/Meta-Llama-3.1-70B-Instruct
export HUGGINGFACEHUB_API_TOKEN=${HF_TOKEN}
@@ -173,8 +173,8 @@ prepare_data
# echo "Building vllm docker image...."
# build_vllm_docker_images

echo "Launching vllm service...."
start_vllm_service
# echo "Launching vllm service...."
# start_vllm_service

# echo "Generating hints_file..."
# generate_hints_for_benchmark
85 changes: 62 additions & 23 deletions tests/agent/test_agent_langchain_on_intel_hpu.sh
Original file line number Diff line number Diff line change
@@ -6,12 +6,20 @@

WORKPATH=$(dirname "$PWD")
echo $WORKPATH
ls $WORKPATH
echo "========================="
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')
tgi_port=8085
tgi_volume=$WORKPATH/data
vllm_port=8086
vllm_volume=$WORKPATH/data
echo "tgi volume: "
ls $tgi_volume
echo "========================="
if [ -d "$WORKPATH/hub" ]; then
ls "$WORKPATH/hub"
fi
vllm_port=8085
export vllm_volume=$WORKPATH

export WORKPATH=$WORKPATH

@@ -135,10 +143,37 @@ function start_vllm_auto_tool_choice_service() {
echo "Service started successfully"
}

function start_vllm_service_70B() {
# redis endpoint
echo "token is ${HF_TOKEN}"

#single card
echo "start vllm gaudi service"
echo "**************model is $model**************"
docker run -d --runtime=habana --rm --name "test-comps-vllm-gaudi-service" -e HABANA_VISIBLE_DEVICES=0,1,2,3 -p $vllm_port:80 -v $vllm_volume:/data -e HF_TOKEN=$HF_TOKEN -e HF_HOME=/data -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e no_proxy=$no_proxy -e VLLM_SKIP_WARMUP=true --cap-add=sys_nice --ipc=host opea/vllm-gaudi:comps --model ${model} --host 0.0.0.0 --port 80 --block-size 128 --max-seq-len-to-capture 16384 --tensor-parallel-size 4
sleep 5s
echo "Waiting vllm gaudi ready"
n=0
until [[ "$n" -ge 100 ]] || [[ $ready == true ]]; do
docker logs test-comps-vllm-gaudi-service &> ${LOG_PATH}/vllm-gaudi-service.log
n=$((n+1))
if grep -q "Uvicorn running on" ${LOG_PATH}/vllm-gaudi-service.log; then
break
fi
if grep -q "No such container" ${LOG_PATH}/vllm-gaudi-service.log; then
echo "container test-comps-vllm-gaudi-service not found"
exit 1
fi
sleep 5s
done
sleep 5s
echo "Service started successfully"
}

function start_react_langchain_agent_service() {
echo "Starting react_langchain agent microservice"
docker compose -f $WORKPATH/tests/agent/react_langchain.yaml up -d
sleep 5s
sleep 60s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}
@@ -147,7 +182,7 @@ function start_react_langchain_agent_service() {
function start_react_langgraph_agent_service_openai() {
echo "Starting react_langchain agent microservice"
docker compose -f $WORKPATH/tests/agent/react_langgraph_openai.yaml up -d
sleep 5s
sleep 60s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}
@@ -156,15 +191,15 @@ function start_react_langgraph_agent_service_openai() {
function start_react_llama_agent_service() {
echo "Starting react_langgraph agent microservice"
docker compose -f $WORKPATH/tests/agent/reactllama.yaml up -d
sleep 5s
sleep 60s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}

function start_react_langgraph_agent_service_vllm() {
echo "Starting react_langgraph agent microservice"
docker compose -f $WORKPATH/tests/agent/react_vllm.yaml up -d
sleep 5s
sleep 60s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}
@@ -180,23 +215,23 @@ function start_planexec_agent_service_vllm() {
function start_ragagent_agent_service() {
echo "Starting rag agent microservice"
docker compose -f $WORKPATH/tests/agent/ragagent.yaml up -d
sleep 5s
sleep 60s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}

function start_ragagent_agent_service_openai() {
echo "Starting rag agent microservice"
docker compose -f $WORKPATH/tests/agent/ragagent_openai.yaml up -d
sleep 5s
sleep 60s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}

function start_planexec_agent_service_openai() {
echo "Starting plan execute agent microservice"
docker compose -f $WORKPATH/tests/agent/planexec_openai.yaml up -d
sleep 5s
sleep 60s
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}
@@ -309,8 +344,8 @@ function validate_sql_agent(){
if [ "$EXIT_CODE" == "1" ]; then
echo "==================SQL Agent logs ======================"
docker logs test-comps-agent-endpoint
echo "================== vllm gaudi service logs ======================"
docker logs test-comps-vllm-gaudi-service
# echo "================== vllm gaudi service logs ======================"
# docker logs test-comps-vllm-gaudi-service
exit 1
fi
}
@@ -320,9 +355,13 @@ function main() {
stop_agent_docker
stop_docker
build_docker_images
build_vllm_docker_images

# ==================== Tests with 70B model ====================
# RAG agent, react_llama, react_langchain, assistant apis

# ==================== TGI tests ====================
start_tgi_service
# start_tgi_service
start_vllm_service_70B

# test rag agent
start_ragagent_agent_service
@@ -331,26 +370,30 @@ function main() {
stop_agent_docker
echo "============================================="

# test react_llama
# # test react_llama
start_react_llama_agent_service
echo "===========Testing ReAct Llama ============="
validate_microservice
stop_agent_docker
echo "============================================="


# test react_langchain
# # test react_langchain
start_react_langchain_agent_service
echo "=============Testing ReAct Langchain============="
validate_microservice_streaming
validate_assistant_api
stop_agent_docker
echo "============================================="

stop_tgi_docker
# stop_tgi_docker

# ==================== VLLM tests ====================
build_vllm_docker_images
# test sql agent
validate_sql_agent

stop_docker

# # # ==================== Test react_langgraph with vllm auto-tool-choice ====================

export model=mistralai/Mistral-7B-Instruct-v0.3
export LLM_MODEL_ID=${model}
@@ -366,6 +409,7 @@ function main() {
stop_vllm_docker
echo "============================================="

# # # ==================== Test plan-execute agent with vllm guided decoding ====================
# test plan execute with vllm - Mistral
# start_vllm_service
# start_planexec_agent_service_vllm
@@ -407,11 +451,6 @@ function main() {
# validate_microservice
# stop_agent_docker

stop_docker
mv $WORKPATH/data $WORKPATH/hub
# test sql agent
validate_sql_agent
mv $WORKPATH/hub $WORKPATH/data
stop_docker

echo y | docker system prune 2>&1 > /dev/null