From 8b09b79975a0f233da5faa20ab23f6bf6cd5453d Mon Sep 17 00:00:00 2001 From: "Wang, Xigui" Date: Fri, 9 Aug 2024 17:20:12 +0800 Subject: [PATCH] LLM micro service extract input model name No input model from UI. vLLM and OLLAMA get the model name from the envrionment TGI mode model form ModelID Signed-off-by: Wang, Xigui --- comps/llms/text-generation/ollama/llm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comps/llms/text-generation/ollama/llm.py b/comps/llms/text-generation/ollama/llm.py index 5374cfa69d..aadb2e2faf 100644 --- a/comps/llms/text-generation/ollama/llm.py +++ b/comps/llms/text-generation/ollama/llm.py @@ -21,7 +21,7 @@ def llm_generate(input: LLMParamsDoc): ollama = Ollama( base_url=ollama_endpoint, - model=input.model, + model=input.model if input.model else model_name, num_predict=input.max_new_tokens, top_k=input.top_k, top_p=input.top_p, @@ -49,4 +49,5 @@ async def stream_generator(): if __name__ == "__main__": ollama_endpoint = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434") + model_name = os.getenv("OLLAMA_MODEL", "meta-llama/Meta-Llama-3-8B-Instruct") opea_microservices["opea_service@llm_ollama"].start()