-
Notifications
You must be signed in to change notification settings - Fork 0
/
llm.py
29 lines (24 loc) · 1.06 KB
/
llm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
from dotenv import load_dotenv, find_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
from langchain.chains import LLMChain
load_dotenv(find_dotenv())
class LLM:
"""LLM Class for RAG Ppeline thats applied after Re-ranking documents
"""
def __init__(self, model: str, temperature: int) -> None:
self.model = model
self.temperature = temperature
def generate(self, query: str, context: str) -> str:
"""Generate a response from OpenAI LLM using the given context
"""
summary = """
You're an assistant to anser questions using the given context.
Context: {context}
Answer the following question: {query}
"""
llm = ChatOpenAI(temperature=self.temperature, model=self.model)
prompt_template = PromptTemplate(input_variables=["context"], template=summary)
chain = LLMChain(llm=llm, prompt=prompt_template)
result = chain.invoke(input={"context": context, "query": query})
return result["text"]