From 6d6f7fab15a2ededd29318ba275b13d88f48ba5d Mon Sep 17 00:00:00 2001 From: vansangpfiev Date: Tue, 9 Jul 2024 10:43:17 +0700 Subject: [PATCH] fix: build --- .../scripts/e2e-test-whisper-linux-and-mac.sh | 93 +++++++++++++++++++ Makefile | 25 ----- examples/server/server.cc | 6 +- 3 files changed, 96 insertions(+), 28 deletions(-) create mode 100644 .github/scripts/e2e-test-whisper-linux-and-mac.sh diff --git a/.github/scripts/e2e-test-whisper-linux-and-mac.sh b/.github/scripts/e2e-test-whisper-linux-and-mac.sh new file mode 100644 index 0000000..8cb588c --- /dev/null +++ b/.github/scripts/e2e-test-whisper-linux-and-mac.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +## Example run command +# ./linux-and-mac.sh './jan/plugins/@janhq/inference-plugin/dist/nitro/nitro_mac_arm64' https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/resolve/main/tinyllama-1.1b-chat-v0.3.Q2_K.gguf + +# Check for required arguments +if [[ $# -ne 2 ]]; then + echo "Usage: $0 " + exit 1 +fi + +rm /tmp/response1.log /tmp/response2.log /tmp/nitro.log + +BINARY_PATH=$1 +DOWNLOAD_URL=$2 + +# Random port to ensure it's not used +min=10000 +max=11000 +range=$((max - min + 1)) +PORT=$((RANDOM % range + min)) + +# Start the binary file +"$BINARY_PATH" 1 127.0.0.1 $PORT >/tmp/nitro.log & + +# Get the process id of the binary file +pid=$! + +if ! ps -p $pid >/dev/null; then + echo "nitro failed to start. Logs:" + cat /tmp/nitro.log + exit 1 +fi + +# Wait for a few seconds to let the server start +sleep 5 + +# Check if /tmp/testwhisper exists, if not, download it +if [[ ! -f "/tmp/testwhisper" ]]; then + curl --connect-timeout 300 $DOWNLOAD_URL --output /tmp/testwhisper +fi + +# Run the curl commands +response1=$(curl --connect-timeout 60 -o /tmp/response1.log -s -w "%{http_code}" --location "http://127.0.0.1:$PORT/load_model" \ + --header 'Content-Type: application/json' \ + --data '{ + "model_path": "/tmp/testwhisper", + "model_id": "whisper.cpp" +}') + +response2=$( + curl --connect-timeout 60 -o /tmp/response2.log -s -w "%{http_code}" --location "http://127.0.0.1:$PORT/v1/audio/transcriptions" \ + --header 'Access-Control-Allow-Origin: *' \ + --form 'file=@"../whisper.cpp/samples/jfk.wav"' \ + --form 'model_id="whisper.cpp"' \ + --form 'temperature="0.0"' \ + --form 'prompt="The transcript is about OpenAI which makes technology like DALLĀ·E, GPT-3, and ChatGPT with the hope of one day building an AGI system that benefits all of humanity. The president is trying to raly people to support the cause."' \ + +) + +error_occurred=0 +if [[ "$response1" -ne 200 ]]; then + echo "The first curl command failed with status code: $response1" + cat /tmp/response1.log + error_occurred=1 +fi + +if [[ "$response2" -ne 200 ]]; then + echo "The second curl command failed with status code: $response2" + cat /tmp/response2.log + error_occurred=1 +fi + +if [[ "$error_occurred" -eq 1 ]]; then + echo "cortex.audio test run failed!!!!!!!!!!!!!!!!!!!!!!" + echo "cortex.audio Error Logs:" + cat /tmp/nitro.log + kill $pid + exit 1 +fi + +echo "----------------------" +echo "Log load model:" +cat /tmp/response1.log + +echo "----------------------" +echo "Log run test:" +cat /tmp/response2.log + +echo "cortex.audio test run successfully!" + +# Kill the server process +kill $pid diff --git a/Makefile b/Makefile index 200d778..6862bea 100644 --- a/Makefile +++ b/Makefile @@ -104,29 +104,4 @@ else cd examples/server/build/; \ cp ../../../build/libengine.dylib engines/cortex.audio/; \ chmod +x ../../../.github/scripts/e2e-test-server-linux-and-mac.sh && ../../../.github/scripts/e2e-test-server-linux-and-mac.sh ./server $(LLM_MODEL_URL) $(EMBEDDING_MODEL_URL); -endif - -run-e2e-submodule-test: -ifeq ($(RUN_TESTS),false) - @echo "Skipping tests" - @exit 0 -endif -ifeq ($(OS),Windows_NT) - @powershell -Command "python -m pip install --upgrade pip" - @powershell -Command "python -m pip install requests;" - @powershell -Command "mkdir -p examples\server\build\engines\cortex.audio; cd examples\server\build; cp ..\..\..\build\engine.dll engines\cortex.audio; python ..\..\..\.github\scripts\e2e-test-server.py server $(LLM_MODEL_URL) $(EMBEDDING_MODEL_URL);" -else ifeq ($(shell uname -s),Linux) - python -m pip install --upgrade pip; - python -m pip install requests; - @mkdir -p examples/server/build/engines/cortex.audio; \ - cd examples/server/build/; \ - cp ../../../build/libengine.so engines/cortex.audio/; \ - python ../../../.github/scripts/e2e-test-server.py server $(LLM_MODEL_URL) $(EMBEDDING_MODEL_URL); -else - python -m pip install --upgrade pip; - python -m pip install requests; - @mkdir -p examples/server/build/engines/cortex.audio; \ - cd examples/server/build/; \ - cp ../../../build/libengine.dylib engines/cortex.audio/; \ - python ../../../.github/scripts/e2e-test-server.py server $(LLM_MODEL_URL) $(EMBEDDING_MODEL_URL); endif \ No newline at end of file diff --git a/examples/server/server.cc b/examples/server/server.cc index 7c5ff50..fdb0245 100644 --- a/examples/server/server.cc +++ b/examples/server/server.cc @@ -177,7 +177,7 @@ int main(int argc, char** argv) { LOG_ERROR << "Save file failed"; } (*req_body)[id] = temp_file_path; - } else if (id == "model") { + } else { (*req_body)[id] = f.content; } } @@ -239,8 +239,8 @@ int main(int argc, char** argv) { svr->Post("/loadmodel", handle_load_model); // Use POST since httplib does not read request body for GET method svr->Post("/unloadmodel", handle_unload_model); - svr->Post("/audio/transcriptions", handle_transcriptions); - svr->Post("/audio/translations", handle_translations); + svr->Post("/v1/audio/transcriptions", handle_transcriptions); + svr->Post("/v1/audio/translations", handle_translations); svr->Post("/modelstatus", handle_get_model_status); svr->Get("/models", handle_get_running_models); std::atomic running = true;