forked from trancethehuman/hr-gpt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ai_agents.py
48 lines (37 loc) · 1.64 KB
/
ai_agents.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
from dotenv import load_dotenv
from typing import List
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from consts import llm_model_type
# Load .env variables
load_dotenv()
# LLM Initialization
openai_api_key = os.getenv("OPENAI_API_KEY")
llm = ChatOpenAI(max_retries=3, temperature=0, # type: ignore
model_name=llm_model_type)
def get_agent_zero_shot_response(query: str, tools: List, messages_history: List, is_agent_verbose: bool = False, max_iterations: int = 3, return_thought_process: bool = False):
"""
This function takes a query, a list of tools, and some optional parameters, initializes a
conversational LangChain agent, and returns the agent's response to the query.
"""
memory = ConversationBufferMemory(
memory_key="chat_history")
# Add messages to memory
for message_dict in messages_history:
message_value = message_dict['message']
sender = message_dict['type']
if (sender == 'user'):
memory.chat_memory.add_user_message(message_value)
if (sender == 'AI'):
memory.chat_memory.add_ai_message(message_value)
# Initialize agent
agent = initialize_agent(
tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=is_agent_verbose, max_iterations=max_iterations, return_intermediate_steps=return_thought_process, memory=memory)
# Get results from chain
result = agent({"input": query})
answer = result["output"]
# Debug agent's answer to console
print(answer)
return answer