diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..c60abb9f --- /dev/null +++ b/404.html @@ -0,0 +1,683 @@ + + + +
+ + + + + + + + + + + + + + + + + + +An Agency
is a collection of Agents that can communicate with one another.
Here are the primary benefits of using an Agency, instead of an individual agent:
+Scalability: As the complexity of your integration increases, you can keep adding more and more agents.
+Tip
+It is recommended to start with as few agents as possible, fine-tune them until they are working as expected, and only then add new agents to the agency. If you add too many agents at first, it will be difficult to debug and understand what is going on.
+Unlike all other frameworks, communication flows in Agency Swarm are not hierarchical or sequential. Instead, they are uniform. You can define them however you want. But keep in mind that they are established from left to right inside the agency_chart
. So, in the example below, the CEO can initiate communication and send tasks to the Developer and the Virtual Assistant, and they can respond back to him in the same thread, but the Developer or the VA cannot initiate a conversation and assign tasks to the CEO. You can add as many levels of communication as you want.
from agency_swarm import Agency
+
+agency = Agency([
+ ceo, dev # CEO and Developer will be the entry point for communication with the user
+ [ceo, dev], # CEO can initiate communication with Developer
+ [ceo, va], # CEO can initiate communication with Virtual Assistant
+ [dev, va] # Developer can initiate communication with Virtual Assistant
+])
+
All agents added inside the top-level list of agency_chart
without being part of a second list, can talk to the user.
To stream the conversation between agents, you can use the get_completion_stream
method with your event handler like below. The process is extremely similar to the one in the official documentation.
The only difference is that you must extend the AgencyEventHandler
class, which has 2 additional properties: agent_name
and recipient_agent_name
, to get the names of the agents communicating with each other. (See the on_text_created
below.)
from typing_extensions import override
+from agency_swarm import AgencyEventHandler
+
+class EventHandler(AgencyEventHandler):
+ @override
+ def on_text_created(self, text) -> None:
+ # get the name of the agent that is sending the message
+ print(f"\n{self.recipient_agent_name} @ {self.agent_name} > ", end="", flush=True)
+
+ @override
+ def on_text_delta(self, delta, snapshot):
+ print(delta.value, end="", flush=True)
+
+ def on_tool_call_created(self, tool_call):
+ print(f"\n{self.recipient_agent_name} > {tool_call.type}\n", flush=True)
+
+ def on_tool_call_delta(self, delta, snapshot):
+ if delta.type == 'code_interpreter':
+ if delta.code_interpreter.input:
+ print(delta.code_interpreter.input, end="", flush=True)
+ if delta.code_interpreter.outputs:
+ print(f"\n\noutput >", flush=True)
+ for output in delta.code_interpreter.outputs:
+ if output.type == "logs":
+ print(f"\n{output.logs}", flush=True)
+
+ @classmethod
+ def on_all_streams_end(cls):
+ print("\n\nAll streams have ended.") # Conversation is over and message is returned to the user.
+
+response = agency.get_completion_stream("I want you to build me a website", event_handler=EventHandler)
+
Also, there is an additional class method on_all_streams_end
which is called when all streams have ended. This method is needed because, unlike in the official documentation, your event handler will be called multiple times and probably by even multiple agents.
When it comes to asynchronous execution, there are 2 modes you can use at the moment: threading
, tools_threading
.
If you would like to use asynchronous communication between agents, you can specify a async_mode
parameter to threading
. This is useful when you don't want to wait for a response from an agent. For example, if it takes it long to write it.
With this mode, the response from the SendMessage
tool will be returned instantly as a system notification with a status update. The recipient agent will then continue to execute the task in the background. The caller agent can check the status (if task is in progress) or the response (if the task is completed) with the GetResponse
tool.
If you would like to use asynchronous execution for tools, you can specify a async_mode
parameter to tools_threading
. With this mode on, all tools will be executed concurrently in separate threads, which can significantly speed up the work flow of I/O bound tasks.
You can add shared files for all agents in the agency by specifying a folder path in a shared_files
parameter. This is useful for sharing common resources that all agents need to access.
If you would like to use a different file path for the settings, other than default settings.json
, you can specify a settings_path
parameter. All your agent states will then be saved and loaded from this file. If this file does not exist, it will be created, along with new Assistants on your OpenAI account.
You can also specify parameters like temperature
, top_p
, max_completion_tokens
, max_prompt_tokens
and truncation_strategy
, parameters for the entire agency. These parameters will be used as default values for all agents in the agency, however, you can still override them for individual agents by specifying them in the agent's constructor.
When it comes to running the agency, you have 3 options:
+response = agency.get_completion("I want you to build me a website",
+ additional_instructions="This is an additional instruction for the task.",
+ tool_choice={"type": "function", "function": {"name": "SendMessage"}},
+ attachments=[],
+ recipient_agent=dev,
+ )
+print(response)
+
Params like additional_instructions
, tool_choice
, and attachments
are optional. You can also specify the recipient_agent
parameter to send the message to a specific agent.
To talk to one of the top-level agents when running the agency from your terminal, you can use mentions feature, similar to how you would use it inside ChatGPT. Simply mention the agent name in the message like @Developer I want you to build me a website
. The message will then be sent to the Developer agent, instead of the CEO. You can also use tab to autocomplete the agent name after the @
symbol.
If you would like to delete the agency and all its agents with all associated files and vector stores, you can use the delete
method.
Agents are essentially wrappers for Assistants in OpenAI Assistants API. The Agent
class contains a lot of convenience methods to help you manage the state of your assistant, upload files, attach tools, and more.
All parameters inside the Agent class, primarily follow the same structure as OpenAI's Assistants API. However, there are a few additional parameters that you can use to customize your agent.
+You can specify weather to run tools in parallel or sequentially by setting the parallel_tool_calls
parameter. By default, this parameter is set to True
.
Now, the agent will run all tools sequentially.
+You can also specify the file search configuration for the agent, as described in the OpenAI documentation. Right now, only max_num_results
is supported.
from agency_swarm import Agent
+
+agent = Agent(name='MyAgent', file_search={'max_num_results': 25}) # must be between 1 and 50
+
You can specify the folder where the agent will look for OpenAPI schemas to convert into tools. Additionally, you can add api_params
and api_headers
to the schema to pass additional parameters and headers to the API call.
from agency_swarm import Agent
+
+agent = Agent(name='MyAgent',
+ schemas_folder='schemas',
+ api_params={'my_schema.json': {'param1': 'value1'}},
+ api_headers={'my_schema.json': {'Authorization': 'Bearer token'}}
+ )
+
Note
+Schemas folder automatically converts any OpenAPI schemas into BaseTools. This means that your agents will type check all the API parameters before calling the API, which significantly reduces any chances of errors.
+You can use any previously fine-tuned model by specifying the model
parameter in the agent.
You can also provide a response validator function to validate the response before sending it to the user or another agent. This function should raise an error if the response is invalid.
+from agency_swarm import Agent
+
+class MyAgent(Agent):
+ def response_validator(self, message: str) -> str:
+ """This function is used to validate the response before sending it to the user or another agent."""
+ if "bad word" in message:
+ raise ValueError("Please don't use bad words.")
+
+ return message
+
You can now also provide few-shot examples for each agent. These examples help the agent to understand how to respond. The format for examples follows message object format on OpenAI:
+examples=[
+ {
+ "role": "user",
+ "content": "Hi!",
+ "attachments": [],
+ "metadata": {},
+ },
+ {
+ "role": "assistant",
+ "content": "Hi! I am the CEO. I am here to help you with your tasks. Please tell me what you need help with.",
+ "attachments": [],
+ "metadata": {},
+ }
+]
+
+agent.examples = examples
+
or you can also provide them when initializing the agent in init method:
+ +When it comes to creating your agent, you have 3 options:
+To define your agent in the code, you can simply instantiate the Agent
class and pass the required parameters.
from agency_swarm import Agent
+
+agent = Agent(name="My Agent",
+ description="This is a description of my agent.",
+ instructions="These are the instructions for my agent.",
+ tools=[ToolClass1, ToolClass2],
+ temperature=0.3,
+ max_prompt_tokens=25000
+ )
+
This CLI command simplifies the process of creating a structured environment for each agent.
+agency-swarm create-agent-template --name "AgentName" --description "Agent Description" [--path "/path/to/directory"] [--use_txt]
+
When you run the create-agent-template
command, it creates the following folder structure for your agent:
/your-specified-path/
+│
+├── agency_manifesto.md or .txt # Agency's guiding principles (created if not exists)
+└── AgentName/ # Directory for the specific agent
+ ├── files/ # Directory for files that will be uploaded to openai
+ ├── schemas/ # Directory for OpenAPI schemas to be converted into tools
+ ├── tools/ # Directory for tools to be imported by default.
+ ├── AgentName.py # The main agent class file
+ ├── __init__.py # Initializes the agent folder as a Python package
+ └── instructions.md or .txt # Instruction document for the agent
+
files
: This folder is used to store files that will be uploaded to OpenAI. You can use any of the acceptable file formats. After file is uploaded, an id will be attached to the file name to avoid re-uploading the same file twice.schemas
: This folder is used to store OpenAPI schemas that will be converted into tools automatically. All you have to do is put the schema in this folder, and specify it when initializing your agent.tools
: This folder is used to store tools in the form of Python files. Each file must have the same name as the tool class for it to be imported by default. For example, ExampleTool.py
must contain a class called ExampleTool
.The AgentName.py
file will contain the following code:
from agency_swarm.agents import Agent
+
+class AgentName(Agent):
+ def __init__(self):
+ super().__init__(
+ name="agent_name",
+ description="agent_description",
+ instructions="./instructions.md",
+ files_folder="./files",
+ schemas_folder="./schemas",
+ tools_folder="./tools",
+ temperature=0.3,
+ max_prompt_tokens=25000,
+ examples=[]
+ )
+
+ def response_validator(self, message: str) -> str:
+ """This function is used to validate the response before sending it to the user or another agent."""
+ if "bad word" in message:
+ raise ValueError("Please don't use bad words.")
+
+ return message
+
To initialize the agent, you can simply import the agent and instantiate it:
+ +For the most complex and requested use cases, we will be creating premade agents that you can import and reuse in your own projects. To import an existing agent, you can run the following CLI command:
+ +This will copy all your agent source files locally. You can then import the agent as shown above. To check available agents, simply run this command without any arguments.
+ + + + + + + + + + + + + +Many organizations are concerned about data privacy and sharing their data with OpenAI. However, using Azure ensures that your data is processed in a secure environment, allowing you to utilize the OpenAI API without even sharing data with OpenAI itself.
+Before you begin, ensure that you have the following:
+To use Azure OpenAI, you need to change OpenAI client with AzureOpenAI client. Here is an example of how you can do it in agency swarm:
+from openai import AzureOpenAI
+from agency_swarm import set_openai_client
+
+client = AzureOpenAI(
+ api_key=os.getenv("AZURE_OPENAI_KEY"),
+ api_version="2024-02-15-preview",
+ azure_endpoint=os.getenv("AZURE_ENDPOINT"),
+ timeout=5,
+ max_retries=5,
+)
+
+set_openai_client(client)
+
Then, you also have to replace model
parameter inside each agent with your model deployment name from Azure. Here is an example of how you can do it:
Then, you can run your agency as usual:
+ +Retrieval is not supported yet
+Currently, Azure OpenAI does not support the Retrieval
tool. You can only use CodeInterpreter
or custom tools made with the BaseTool
class.
You can find an example notebook for using Azure OpenAI in the notebooks folder.
+ + + + + + + + + + + + + +Multi-agent communication is the core functionality of any Multi-Agent System. Unlike in all other frameworks, Agency Swarm not only allows you to define communication flows in any way you want (uniform communication flows), but to also configure the underlying logic for this feature. This means that you can create entirely new types of communication, or adjust it to your own needs. Below you will find a guide on how to do all this, along with some common examples.
+Agency Swarm contains multiple commonly requested classes for communication flows. Currently, the following classes are available:
+Class Name | +Description | +When to Use | +Code Link | +
---|---|---|---|
SendMessage (default) |
+This is the default class for sending messages to other agents. It uses synchronous communication with basic COT (Chain of Thought) prompting and allows agents to relay files and modify system instructions for each other. | +Suitable for most use cases. Balances speed and functionality. | +link | +
SendMessageQuick |
+A variant of the SendMessage class without Chain of Thought prompting, files, and additional instructions. It allows for faster communication without the overhead of COT. | +Use for simpler use cases or when you want to save tokens and increase speed. | +link | +
SendMessageAsyncThreading |
+Similar to SendMessage but with async_mode='threading' . Each agent will execute asynchronously in a separate thread. In the meantime, the caller agent can continue the conversation with the user and check the results later. |
+Use for asynchronous applications or when sub-agents take singificant amounts of time to complete their tasks. | +link | +
SendMessageSwarm |
+Instead of sending a message to another agent, it replaces the caller agent with the recipient agent, similar to OpenAI's Swarm. The recipient agent will then have access to the entire conversation. | +When you need more granular control. It is not able to handle complex multi-step, multi-agent tasks. | +link | +
To use any of the pre-made SendMessage
classes, simply put it in the send_message_tool_class
parameter when initializing the Agency
class:
from agency_swarm.tools.send_message import SendMessageQuick
+
+agency = Agency(
+ ...
+ send_message_tool_class=SendMessageQuick
+)
+
That's it! Now, your agents will use your own custom SendMessageQuick
class for communication.
To create you own communication flow, you will first need to extend the SendMessageBase
class. This class extends the BaseTool
class, like any other tools in Agency Swarm, and contains the most basic parameters required for communication, such as the recipient_agent
.
SendMessage
ClassBy defualt, Agency Swarm uses the following tool for communication:
+from typing import Optional, List
+from pydantic import Field, field_validator, model_validator
+from .SendMessageBase import SendMessageBase
+
+class SendMessage(SendMessageBase):
+ """Use this tool to facilitate direct, synchronous communication between specialized agents within your agency. When you send a message using this tool, you receive a response exclusively from the designated recipient agent. To continue the dialogue, invoke this tool again with the desired recipient agent and your follow-up message. Remember, communication here is synchronous; the recipient agent won't perform any tasks post-response. You are responsible for relaying the recipient agent's responses back to the user, as the user does not have direct access to these replies. Keep engaging with the tool for continuous interaction until the task is fully resolved. Do not send more than 1 message to the same recipient agent at the same time."""
+ my_primary_instructions: str = Field(
+ ...,
+ description=(
+ "Please repeat your primary instructions step-by-step, including both completed "
+ "and the following next steps that you need to perform. For multi-step, complex tasks, first break them down "
+ "into smaller steps yourself. Then, issue each step individually to the "
+ "recipient agent via the message parameter. Each identified step should be "
+ "sent in a separate message. Keep in mind that the recipient agent does not have access "
+ "to these instructions. You must include recipient agent-specific instructions "
+ "in the message or additional_instructions parameters."
+ )
+ )
+ message: str = Field(
+ ...,
+ description="Specify the task required for the recipient agent to complete. Focus on clarifying what the task entails, rather than providing exact instructions. Make sure to inlcude all the relevant information needed to complete the task."
+ )
+ message_files: Optional[List[str]] = Field(
+ default=None,
+ description="A list of file IDs to be sent as attachments to this message. Only use this if you have the file ID that starts with 'file-'.",
+ examples=["file-1234", "file-5678"]
+ )
+ additional_instructions: Optional[str] = Field(
+ default=None,
+ description="Additional context or instructions from the conversation needed by the recipient agent to complete the task."
+ )
+
+ @model_validator(mode='after')
+ def validate_files(self):
+ # prevent hallucinations with agents sending file IDs into incorrect fields
+ if "file-" in self.message or (self.additional_instructions and "file-" in self.additional_instructions):
+ if not self.message_files:
+ raise ValueError("You must include file IDs in message_files parameter.")
+ return self
+
+
+ def run(self):
+ return self._get_completion(message=self.message,
+ message_files=self.message_files,
+ additional_instructions=self.additional_instructions)
+
Let's break down the code.
+In general, all SendMessage
tools have the following components:
message
, message_files
, additional_instructions
are used to provide the recipient agent with the necessary information.run
method: This is where the communication logic is implemented. Most of the time, you just need to map your parameters to self._get_completion()
the same way you would call it in the agency.get_completion()
method.When creating your own SendMessage
tools, you can use the above components as a template.
In the following sections, we'll look at some common use cases for extending the SendMessageBase
tool and how to implement them, so you can learn how to create your own SendMessage tools and use them in your own applications.
The most basic use case is if you want to use your own parameter descriptions, such as if you want to change the docstring or the description of the message
parameter. This can help you better customize how the agents communicate with each other and what information they relay.
Let's say that instead of sending messages, I want my agents to send tasks to each other. In this case, I can change the docstring and the message
parameter to a task
parameter to better fit the nature of my application.
from pydantic import Field
+from agency_swarm.tools.send_message import SendMessageBase
+
+class SendMessageTask(SendMessageBase):
+ """Use this tool to send tasks to other agents within your agency."""
+ chain_of_thought: str = Field(
+ ...,
+ description="Please think step-by-step about how to solve your current task, provided by the user. Then, break down this task into smaller steps and issue each step individually to the recipient agent via the task parameter."
+ )
+ task: str = Field(
+ ...,
+ description="Specify the task required for the recipient agent to complete. Focus on clarifying what the task entails, rather than providing exact instructions. Make sure to inlcude all the relevant information needed to complete the task."
+ )
+
+ def run(self):
+ return self._get_completion(message=self.task)
+
To remove the chain of thought, you can simply remove the chain_of_thought
parameter.
Now, let's say that I need to ensure that my message is sent to the correct recepient agent. (This is a very common hallucination in production.) In this case, I can add custom validator to the recipient
parameter, which is defined in the SendMessageBase
class. Since I don't want to change any other parameters or descriptions, I can inherit the default SendMessage
class and only add this new validation logic.
from agency_swarm.tools.send_message import SendMessage
+from pydantic import model_validator
+
+class SendMessageValidation(SendMessage):
+ @model_validator(mode='after')
+ def validate_recipient(self):
+ if "customer support" not in self.message.lower() and self.recipient == "CustomerSupportAgent":
+ raise ValueError("Messages not related to customer support cannot be sent to the customer support agent.")
+ return self
+
You can, of course, also use GPT for this:
+from agency_swarm.tools.send_message import SendMessage
+from agency_swarm.util.validators import llm_validator
+from pydantic import model_validator
+
+class SendMessageLLMValidation(SendMessage):
+ @model_validator(mode='after')
+ def validate_recipient(self):
+ if self.recipient == "CustomerSupportAgent":
+ llm_validator(
+ statement="The message is related to customer support."
+ )(self.message)
+ return self
+
In this example, the llm_validator
will throw an error if the message is not related to customer support. The caller agent will then have to fix the recipient or the message and send it again! This is extremely useful when you have a lot of agents.
Sometimes, when using default SendMessage
, the agents might not relay all the neceessary details to the recipient agent. Especially, when the previous conversation is too long. In this case, you can summarize the previous conversation with GPT and add it to the context, instead of the additional instructions. I will extend the SendMessageQuick
class, which already contains the message
parameter, as I don't need chain of thought or files in this case.
from agency_swarm.tools.send_message import SendMessageQuick
+from agency_swarm.util.oai import get_openai_client
+
+class SendMessageSummary(SendMessageQuick):
+ def run(self):
+ client = get_openai_client()
+ thread = self._get_main_thread() # get the main thread (conversation with the user)
+
+ # get the previous messages
+ previous_messages = thread.get_messages()
+ previous_messages_str = "\n".join([f"{m.role}: {m.content[0].text.value}" for m in previous_messages])
+
+ # summarize the previous conversation
+ summary = client.chat.completions.create(
+ model="gpt-4o-mini",
+ messages=[
+ {"role": "system", "content": "You are a world-class summarizer. Please summarize the following conversation in a few sentences:"},
+ {"role": "user", "content": previous_messages_str}
+ ]
+ )
+
+ # send the message with the summary
+ return self._get_completion(message=self.message, additional_instructions=f"\n\nPrevious conversation summary: '{summary.choices[0].message.content}'")
+
With this example, you can add your own custom logic to the run
method. It does not have to be a summary; you can also use it to add any other information to the context. For example, you can even query a vector database or use an external API.
If you are a PRO, and you have managed to deploy each agent in a separate API endpoint, instead of using _get_completion()
, you can call your own API and let the agents communicate with each other over the internet.
import requests
+from agency_swarm.tools.send_message import SendMessage
+
+class SendMessageAPI(SendMessage):
+ def run(self):
+ response = requests.post(
+ "https://your-api-endpoint.com/send-message",
+ json={"message": self.message, "recipient": self.recipient}
+ )
+ return response.json()["message"]
+
This is very powerful, as you can even allow your agents to colloborate with agents outside your system. More on this is coming soon!
+Contributing
+If you have any ideas for new communication flows, please either adjust this page in docs, or add your new send message tool in the agency_swarm/tools/send_message
folder and open a PR!
After implementing your own SendMessage
tool, simply pass it into the send_message_tool_class
parameter when initializing the Agency
class:
That's it! Now, your agents will use your own custom SendMessageAPI
class for communication!
Agency Swarm has been designed to give you, the developer, full control over your systems. It is the only framework that does not hard-code any prompts, parameters, or even worse, agents for you. With this new feature, the last part of the system that you couldn't fully customize to your own needs is now gone!
+So, I want to encourage you to keep experimenting and designing your own unique communication flows. While the examples above should serve as a good starting point, they do not even merely scratch the surface of what's possible here! I am looking forward to seeing what you will create. Please share it in our Discord server so we can all learn from each other.
+ + + + + + + + + + + + + +While OpenAI is generally recommended, there are situations where you might prefer open-source models. The following projects offer alternatives by mimicking the Assistants API:
+To use agency-swarm with Astra Assistants API, follow these steps:
+1. Create an account on Astra Assistants API and obtain an API key.
+ +2. Add Astra DB Token to your .env file:
+ Copy token from the file that starts with "AstraCS:" and paste it into your .env file.
3. Add other model provider API keys to .env as well:
+PERPLEXITYAI_API_KEY=your_perplexityai_api_key
+ANTHROPIC_API_KEY=your_anthropic_api_key
+TOGETHER_API_KEY=your_together_api_key
+GROQ_API_KEY=your_groq_api_key
+
4. Install the Astra Assistants API and gradio:
+ +5. Patch the OpenAI client:
+from openai import OpenAI
+from astra_assistants import patch
+from agency_swarm import set_openai_client
+from dotenv import load_dotenv
+
+load_dotenv()
+
+client = patch(OpenAI())
+
+set_openai_client(client)
+
6. Create an agent:
+ Create an agent and replace the model parameter with the name of the model you want to use. With Astra Assistants you can upload files like usual using files_folder
.
from agency_swarm import Agent
+
+ceo = Agent(name="ceo",
+ description="I am the CEO",
+ model='ollama/llama3',
+ # model = 'perplexity/llama-3-8b-instruct'
+ # model = 'anthropic/claude-3-5-sonnet-20240620'
+ # model = 'groq/mixtral-8x7b-32768'
+ # model="gpt-4o",
+ files_folder="path/to/your/files"
+ )
+
7. Create an agency:
+You can add more agents as needed, just make sure all manager agents support function calling.
+ +8. Start gradio:
+To utilize your agency in gradio, apply a specific non-streaming demo_gradio
method from the agency-swarm-lab repository:
from agency_swarm import Agency
+from .demo_gradio import demo_gradio
+
+agency = Agency([ceo])
+
+demo_gradio(agency)
+
For a complete example, see the notebook.
+To use agency-swarm with any other projects that mimic the Assistants API, generally, you need to follow these steps:
+1. Install the previous version of agency-swarm as most projects are not yet compatible with streaming and Assistants V2:
+ +2. Switch out the OpenAI client:
+import openai
+from agency_swarm import set_openai_client
+
+client = openai.OpenAI(api_key="whatever", base_url="http://127.0.0.1:8000/")
+
+set_openai_client(client)
+
3. Set the model parameter:
+from agency_swarm import Agent
+
+ceo = Agent(name="ceo", description="I am the CEO", model='ollama/llama3')
+
4. Start Gradio:
+To utilize your agency in gradio, apply a specific non-streaming demo_gradio
method from the agency-swarm-lab repository:
from agency_swarm import Agency
+from .demo_gradio import demo_gradio
+
+agency = Agency([ceo])
+
+demo_gradio(agency)
+
5. For backend integrations, simply use:
+ +Updates will be provided as new open-source assistant API implementations stabilize.
+If you successfully integrate other projects with agency-swarm, please share your experience through an issue or pull request.
+ + + + + + + + + + + + + +All tools in Agency Swarm are created using Instructor.
+The only difference is that you must extend the BaseTool
class and implement the run
method with your logic inside. For many great examples on what you can create, checkout Instructor Cookbook.
This is an example of how to convert an extremely useful tool for RAG applications from instructor. It allows your agents to not only answer questions based on context, but also to provide the exact citations for the answers. This way your users can be sure that the information is always accurate and reliable.
+from agency_swarm.tools import BaseTool, BaseModel
+from pydantic import Field, model_validator, FieldValidationInfo
+from typing import List
+import re
+
+class Fact(BaseModel):
+ fact: str = Field(...)
+ substring_quote: List[str] = Field(...)
+
+ @model_validator(mode="after")
+ def validate_sources(self, info: FieldValidationInfo) -> "Fact":
+ text_chunks = info.context.get("text_chunk", None)
+ spans = list(self.get_spans(text_chunks))
+ self.substring_quote = [text_chunks[span[0] : span[1]] for span in spans]
+ return self
+
+ def get_spans(self, context):
+ for quote in self.substring_quote:
+ yield from self._get_span(quote, context)
+
+ def _get_span(self, quote, context):
+ for match in re.finditer(re.escape(quote), context):
+ yield match.span()
+
+class QuestionAnswer(BaseModel):
+ question: str = Field(...)
+ answer: List[Fact] = Field(...)
+
+ @model_validator(mode="after")
+ def validate_sources(self) -> "QuestionAnswer":
+ self.answer = [fact for fact in self.answer if len(fact.substring_quote) > 0]
+ return self
+
Context Retrieval
+In the original Instructor example, the context is passed into the prompt beforehand, which is typical for standard non-agent LLM applications. However, in the context of Agency Swarm, we must allow the agents to retrieve the context themselves.
+To allow your agents to retrieve the context themselves, we must split QuestionAnswer
into two separate tools: QueryDatabase
and AnswerQuestion
. We must also retrieve context from shared_state
, as the context is not passed into the prompt beforehand, and FieldValidationInfo
is not available in the validate_sources
method.
QueryDatabase
tool will:shared_state
. If it is, raise an error. (This means that the agent retrieved the context twice, without answering the question in between, which is most likely a hallucination.)shared_state
.class QueryDatabase(BaseTool):
+ """Use this tool to query a vector database to retrieve the relevant context for the question."""
+ question: str = Field(..., description="The question to be answered")
+
+ def run(self):
+ # Check if context is already retrieved
+ if self._shared_state.get("context", None) is not None:
+ raise ValueError("Context already retrieved. Please proceed with the AnswerQuestion tool.")
+
+ # Your code to retrieve the context here
+ context = "This is a test context"
+
+ # Then, save the context to the shared state
+ self._shared_state.set("context", context)
+
+ return f"Context retrieved: {context}.\n\n Please proceed with the AnswerQuestion tool."
+
Shared State
+shared_state
is a state that is shared between all tools, across all agents. It allows you to control the execution flow, share data, and provide instructions to the agents based on certain conditions or actions performed by other agents.
AnswerQuestion
tool will:shared_state
to answer the question with a list of facts.shared_state
after the question is answered. (This is done, so the next question can be answered with a fresh context.)class AnswerQuestion(BaseTool):
+ answer: str = Field(..., description="The answer to the question, based on context.")
+ sources: List[Fact] = Field(..., description="The sources of the answer")
+
+ def run(self):
+ # Remove the context after question is answered
+ self._shared_state.set("context", None)
+
+ # additional logic here as needed, for example save the answer to a database
+
+ return "Success. The question has been answered." # or return the answer, if needed
+
+ @model_validator(mode="after")
+ def validate_sources(self) -> "QuestionAnswer":
+ # In "Agency Swarm", context is directly extracted from `shared_state`
+ context = self._shared_state.get("context", None) # Highlighting the change
+ if context is None:
+ # Additional check to ensure context is retrieved before proceeding
+ raise ValueError("Please retrieve the context with the QueryDatabase tool first.")
+ self.answer = [fact for fact in self.answer if len(fact.substring_quote) > 0]
+ return self
+
Fact
toolThe Fact
tool will stay primarily the same. The only difference is that we must extract the context from the shared_state
inside the validate_sources
method. The run
method is not needed, as this tool only validates the input from the model.
class Fact(BaseTool):
+ fact: str = Field(...)
+ substring_quote: List[str] = Field(...)
+
+ def run(self):
+ pass
+
+ @model_validator(mode="after")
+ def validate_sources(self) -> "Fact":
+ context = self._shared_state.get("context", None)
+ text_chunks = context.get("text_chunk", None)
+ spans = list(self.get_spans(text_chunks))
+ self.substring_quote = [text_chunks[span[0] : span[1]] for span in spans]
+ return self
+
+ # Methods `get_spans` and `_get_span` remain unchanged
+
To implement tools with Instructor in Agency Swarm, generally, you must:
+BaseTool
class.run
method with your execution logic inside.Tool factory is a class that allows you to create tools from different sources. You can create tools from Langchain, OpenAPI schemas. However, it is preferable to implement tools from scratch using Instructor, as it gives you a lot more control.
+Not recommended
+This method is not recommended, as it does not provide the same level of type checking, error correction and tool descriptions as Instructor. However, it is still possible to use this method if you prefer.
+ + +# using local file
+with open("schemas/your_schema.json") as f:
+ tools = ToolFactory.from_openapi_schema(
+ f.read(),
+ )
+
+# using requests
+tools = ToolFactory.from_openapi_schema(
+ requests.get("https://api.example.com/openapi.json").json(),
+)
+
Note
+Schemas folder automatically converts any OpenAPI schemas into BaseTools. This means that your agents will type check all the API parameters before calling the API, which significantly reduces any chances of errors.
+Use enumerators or Literal types instead of strings to allow your agents to perform only certain actions or commands, instead of executing any arbitrary code. This makes your whole system a lot more reliable.
+ +Provide additional instructions to the agents in the run
method of the tool as function outputs. This allows you to control the execution flow, based on certain conditions.
class QueryDatabase(BaseTool):
+ question: str = Field(...)
+
+ def run(self):
+ # query your database here
+ context = query_database(self.question)
+
+ if context is None:
+ raise ValueError("No context found. Please propose to the user to change the topic.")
+ else:
+ self._shared_state.set("context", context)
+ return "Context retrieved. Please proceed with explaining the answer."
+
shared_state
to validate actions taken by other agents, before allowing them to proceed with the next action.
+class Action2(BaseTool):
+ input: str = Field(...)
+
+ def run(self):
+ if self._shared_state.get("action_1_result", None) is "failure":
+ raise ValueError("Please proceed with the Action1 tool first.")
+ else:
+ return "Success. The action has been taken."
+
one_call_at_a_time
ToolConfig class attribute to prevent multiple instances of the same tool from running at the same time. This is useful when you want your agents to see the results of the previous action before proceeding with the next one.
+class Action1(BaseTool):
+ input: str = Field(...)
+
+ class ToolConfig:
+ one_call_at_a_time = True
+
+ def run(self):
+ # your code here
+
class GetWeatherTool(BaseTool):
+"""
+Determine weather in a specified location.
+"""
+
+location: str = Field(..., description="The city and state e.g. San Francisco, CA")
+
+class ToolConfig:
+ strict = True # setting strict to true
+
+def run(self):
+ return f"The weather in {self.location} is 30 degrees."
+
Agent
+
+
+agency_swarm/agents/agent.py
28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 +641 +642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 +660 +661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 |
|
__init__(id=None, name=None, description=None, instructions='', tools=None, tool_resources=None, temperature=None, top_p=None, response_format='auto', tools_folder=None, files_folder=None, schemas_folder=None, api_headers=None, api_params=None, file_ids=None, metadata=None, model='gpt-4o-2024-08-06', validation_attempts=1, max_prompt_tokens=None, max_completion_tokens=None, truncation_strategy=None, examples=None, file_search=None, parallel_tool_calls=True, refresh_from_id=True)
+
+Initializes an Agent with specified attributes, tools, and OpenAI client.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ id
+ |
+
+ str
+ |
+
+
+
+ Loads the assistant from OpenAI assistant ID. Assistant will be created or loaded from settings if ID is not provided. Defaults to None. + |
+
+ None
+ |
+
+ name
+ |
+
+ str
+ |
+
+
+
+ Name of the agent. Defaults to the class name if not provided. + |
+
+ None
+ |
+
+ description
+ |
+
+ str
+ |
+
+
+
+ A brief description of the agent's purpose. Defaults to None. + |
+
+ None
+ |
+
+ instructions
+ |
+
+ str
+ |
+
+
+
+ Path to a file containing specific instructions for the agent. Defaults to an empty string. + |
+
+ ''
+ |
+
+ tools
+ |
+
+ List[Union[Type[BaseTool], Type[Retrieval], Type[CodeInterpreter]]]
+ |
+
+
+
+ A list of tools (as classes) that the agent can use. Defaults to an empty list. + |
+
+ None
+ |
+
+ tool_resources
+ |
+
+ ToolResources
+ |
+
+
+
+ A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of vector store IDs. Defaults to None. + |
+
+ None
+ |
+
+ temperature
+ |
+
+ float
+ |
+
+
+
+ The temperature parameter for the OpenAI API. Defaults to None. + |
+
+ None
+ |
+
+ top_p
+ |
+
+ float
+ |
+
+
+
+ The top_p parameter for the OpenAI API. Defaults to None. + |
+
+ None
+ |
+
+ response_format
+ |
+
+ Union[str, Dict, type]
+ |
+
+
+
+ The response format for the OpenAI API. If BaseModel is provided, it will be converted to a response format. Defaults to None. + |
+
+ 'auto'
+ |
+
+ tools_folder
+ |
+
+ str
+ |
+
+
+
+ Path to a directory containing tools associated with the agent. Each tool must be defined in a separate file. File must be named as the class name of the tool. Defaults to None. + |
+
+ None
+ |
+
+ files_folder
+ |
+
+ Union[List[str], str]
+ |
+
+
+
+ Path or list of paths to directories containing files associated with the agent. Defaults to None. + |
+
+ None
+ |
+
+ schemas_folder
+ |
+
+ Union[List[str], str]
+ |
+
+
+
+ Path or list of paths to directories containing OpenAPI schemas associated with the agent. Defaults to None. + |
+
+ None
+ |
+
+ api_headers
+ |
+
+ Dict[str, Dict[str, str]]
+ |
+
+
+
+ Headers to be used for the openapi requests. Each key must be a full filename from schemas_folder. Defaults to an empty dictionary. + |
+
+ None
+ |
+
+ api_params
+ |
+
+ Dict[str, Dict[str, str]]
+ |
+
+
+
+ Extra params to be used for the openapi requests. Each key must be a full filename from schemas_folder. Defaults to an empty dictionary. + |
+
+ None
+ |
+
+ metadata
+ |
+
+ Dict[str, str]
+ |
+
+
+
+ Metadata associated with the agent. Defaults to an empty dictionary. + |
+
+ None
+ |
+
+ model
+ |
+
+ str
+ |
+
+
+
+ The model identifier for the OpenAI API. Defaults to "gpt-4o". + |
+
+ 'gpt-4o-2024-08-06'
+ |
+
+ validation_attempts
+ |
+
+ int
+ |
+
+
+
+ Number of attempts to validate the response with response_validator function. Defaults to 1. + |
+
+ 1
+ |
+
+ max_prompt_tokens
+ |
+
+ int
+ |
+
+
+
+ Maximum number of tokens allowed in the prompt. Defaults to None. + |
+
+ None
+ |
+
+ max_completion_tokens
+ |
+
+ int
+ |
+
+
+
+ Maximum number of tokens allowed in the completion. Defaults to None. + |
+
+ None
+ |
+
+ truncation_strategy
+ |
+
+ TruncationStrategy
+ |
+
+
+
+ Truncation strategy for the OpenAI API. Defaults to None. + |
+
+ None
+ |
+
+ examples
+ |
+
+ List[Dict]
+ |
+
+
+
+ A list of example messages for the agent. Defaults to None. + |
+
+ None
+ |
+
+ file_search
+ |
+
+ FileSearchConfig
+ |
+
+
+
+ A dictionary containing the file search tool configuration. Defaults to None. + |
+
+ None
+ |
+
+ parallel_tool_calls
+ |
+
+ bool
+ |
+
+
+
+ Whether to enable parallel function calling during tool use. Defaults to True. + |
+
+ True
+ |
+
+ refresh_from_id
+ |
+
+ bool
+ |
+
+
+
+ Whether to load and update the agent from the OpenAI assistant ID when provided. Defaults to True. + |
+
+ True
+ |
+
This constructor sets up the agent with its unique properties, initializes the OpenAI client, reads instructions if provided, and uploads any associated files.
+ +agency_swarm/agents/agent.py
69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 |
|
delete()
+
+Deletes assistant, all vector stores, and all files associated with the agent.
+ + +get_openapi_schema(url)
+
+Get openapi schema that contains all tools from the agent as different api paths. Make sure to call this after agency has been initialized.
+ +agency_swarm/agents/agent.py
init_oai()
+
+Initializes the OpenAI assistant for the agent.
+This method handles the initialization and potential updates of the agent's OpenAI assistant. It loads the assistant based on a saved ID, updates the assistant if necessary, or creates a new assistant if it doesn't exist. After initialization or update, it saves the assistant's settings.
+ + +self: Returns the agent instance for chaining methods or further processing.
+agency_swarm/agents/agent.py
179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 |
|
response_validator(message)
+
+Validates the response from the agent. If the response is invalid, it must raise an exception with instructions +for the caller agent on how to proceed.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ message
+ |
+
+ str
+ |
+
+
+
+ The response from the agent. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
str |
+ str
+ |
+
+
+
+ The validated response. + |
+
agency_swarm/agents/agent.py
Agency
+
+
+agency_swarm/agency/agency.py
49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 + 100 + 101 + 102 + 103 + 104 + 105 + 106 + 107 + 108 + 109 + 110 + 111 + 112 + 113 + 114 + 115 + 116 + 117 + 118 + 119 + 120 + 121 + 122 + 123 + 124 + 125 + 126 + 127 + 128 + 129 + 130 + 131 + 132 + 133 + 134 + 135 + 136 + 137 + 138 + 139 + 140 + 141 + 142 + 143 + 144 + 145 + 146 + 147 + 148 + 149 + 150 + 151 + 152 + 153 + 154 + 155 + 156 + 157 + 158 + 159 + 160 + 161 + 162 + 163 + 164 + 165 + 166 + 167 + 168 + 169 + 170 + 171 + 172 + 173 + 174 + 175 + 176 + 177 + 178 + 179 + 180 + 181 + 182 + 183 + 184 + 185 + 186 + 187 + 188 + 189 + 190 + 191 + 192 + 193 + 194 + 195 + 196 + 197 + 198 + 199 + 200 + 201 + 202 + 203 + 204 + 205 + 206 + 207 + 208 + 209 + 210 + 211 + 212 + 213 + 214 + 215 + 216 + 217 + 218 + 219 + 220 + 221 + 222 + 223 + 224 + 225 + 226 + 227 + 228 + 229 + 230 + 231 + 232 + 233 + 234 + 235 + 236 + 237 + 238 + 239 + 240 + 241 + 242 + 243 + 244 + 245 + 246 + 247 + 248 + 249 + 250 + 251 + 252 + 253 + 254 + 255 + 256 + 257 + 258 + 259 + 260 + 261 + 262 + 263 + 264 + 265 + 266 + 267 + 268 + 269 + 270 + 271 + 272 + 273 + 274 + 275 + 276 + 277 + 278 + 279 + 280 + 281 + 282 + 283 + 284 + 285 + 286 + 287 + 288 + 289 + 290 + 291 + 292 + 293 + 294 + 295 + 296 + 297 + 298 + 299 + 300 + 301 + 302 + 303 + 304 + 305 + 306 + 307 + 308 + 309 + 310 + 311 + 312 + 313 + 314 + 315 + 316 + 317 + 318 + 319 + 320 + 321 + 322 + 323 + 324 + 325 + 326 + 327 + 328 + 329 + 330 + 331 + 332 + 333 + 334 + 335 + 336 + 337 + 338 + 339 + 340 + 341 + 342 + 343 + 344 + 345 + 346 + 347 + 348 + 349 + 350 + 351 + 352 + 353 + 354 + 355 + 356 + 357 + 358 + 359 + 360 + 361 + 362 + 363 + 364 + 365 + 366 + 367 + 368 + 369 + 370 + 371 + 372 + 373 + 374 + 375 + 376 + 377 + 378 + 379 + 380 + 381 + 382 + 383 + 384 + 385 + 386 + 387 + 388 + 389 + 390 + 391 + 392 + 393 + 394 + 395 + 396 + 397 + 398 + 399 + 400 + 401 + 402 + 403 + 404 + 405 + 406 + 407 + 408 + 409 + 410 + 411 + 412 + 413 + 414 + 415 + 416 + 417 + 418 + 419 + 420 + 421 + 422 + 423 + 424 + 425 + 426 + 427 + 428 + 429 + 430 + 431 + 432 + 433 + 434 + 435 + 436 + 437 + 438 + 439 + 440 + 441 + 442 + 443 + 444 + 445 + 446 + 447 + 448 + 449 + 450 + 451 + 452 + 453 + 454 + 455 + 456 + 457 + 458 + 459 + 460 + 461 + 462 + 463 + 464 + 465 + 466 + 467 + 468 + 469 + 470 + 471 + 472 + 473 + 474 + 475 + 476 + 477 + 478 + 479 + 480 + 481 + 482 + 483 + 484 + 485 + 486 + 487 + 488 + 489 + 490 + 491 + 492 + 493 + 494 + 495 + 496 + 497 + 498 + 499 + 500 + 501 + 502 + 503 + 504 + 505 + 506 + 507 + 508 + 509 + 510 + 511 + 512 + 513 + 514 + 515 + 516 + 517 + 518 + 519 + 520 + 521 + 522 + 523 + 524 + 525 + 526 + 527 + 528 + 529 + 530 + 531 + 532 + 533 + 534 + 535 + 536 + 537 + 538 + 539 + 540 + 541 + 542 + 543 + 544 + 545 + 546 + 547 + 548 + 549 + 550 + 551 + 552 + 553 + 554 + 555 + 556 + 557 + 558 + 559 + 560 + 561 + 562 + 563 + 564 + 565 + 566 + 567 + 568 + 569 + 570 + 571 + 572 + 573 + 574 + 575 + 576 + 577 + 578 + 579 + 580 + 581 + 582 + 583 + 584 + 585 + 586 + 587 + 588 + 589 + 590 + 591 + 592 + 593 + 594 + 595 + 596 + 597 + 598 + 599 + 600 + 601 + 602 + 603 + 604 + 605 + 606 + 607 + 608 + 609 + 610 + 611 + 612 + 613 + 614 + 615 + 616 + 617 + 618 + 619 + 620 + 621 + 622 + 623 + 624 + 625 + 626 + 627 + 628 + 629 + 630 + 631 + 632 + 633 + 634 + 635 + 636 + 637 + 638 + 639 + 640 + 641 + 642 + 643 + 644 + 645 + 646 + 647 + 648 + 649 + 650 + 651 + 652 + 653 + 654 + 655 + 656 + 657 + 658 + 659 + 660 + 661 + 662 + 663 + 664 + 665 + 666 + 667 + 668 + 669 + 670 + 671 + 672 + 673 + 674 + 675 + 676 + 677 + 678 + 679 + 680 + 681 + 682 + 683 + 684 + 685 + 686 + 687 + 688 + 689 + 690 + 691 + 692 + 693 + 694 + 695 + 696 + 697 + 698 + 699 + 700 + 701 + 702 + 703 + 704 + 705 + 706 + 707 + 708 + 709 + 710 + 711 + 712 + 713 + 714 + 715 + 716 + 717 + 718 + 719 + 720 + 721 + 722 + 723 + 724 + 725 + 726 + 727 + 728 + 729 + 730 + 731 + 732 + 733 + 734 + 735 + 736 + 737 + 738 + 739 + 740 + 741 + 742 + 743 + 744 + 745 + 746 + 747 + 748 + 749 + 750 + 751 + 752 + 753 + 754 + 755 + 756 + 757 + 758 + 759 + 760 + 761 + 762 + 763 + 764 + 765 + 766 + 767 + 768 + 769 + 770 + 771 + 772 + 773 + 774 + 775 + 776 + 777 + 778 + 779 + 780 + 781 + 782 + 783 + 784 + 785 + 786 + 787 + 788 + 789 + 790 + 791 + 792 + 793 + 794 + 795 + 796 + 797 + 798 + 799 + 800 + 801 + 802 + 803 + 804 + 805 + 806 + 807 + 808 + 809 + 810 + 811 + 812 + 813 + 814 + 815 + 816 + 817 + 818 + 819 + 820 + 821 + 822 + 823 + 824 + 825 + 826 + 827 + 828 + 829 + 830 + 831 + 832 + 833 + 834 + 835 + 836 + 837 + 838 + 839 + 840 + 841 + 842 + 843 + 844 + 845 + 846 + 847 + 848 + 849 + 850 + 851 + 852 + 853 + 854 + 855 + 856 + 857 + 858 + 859 + 860 + 861 + 862 + 863 + 864 + 865 + 866 + 867 + 868 + 869 + 870 + 871 + 872 + 873 + 874 + 875 + 876 + 877 + 878 + 879 + 880 + 881 + 882 + 883 + 884 + 885 + 886 + 887 + 888 + 889 + 890 + 891 + 892 + 893 + 894 + 895 + 896 + 897 + 898 + 899 + 900 + 901 + 902 + 903 + 904 + 905 + 906 + 907 + 908 + 909 + 910 + 911 + 912 + 913 + 914 + 915 + 916 + 917 + 918 + 919 + 920 + 921 + 922 + 923 + 924 + 925 + 926 + 927 + 928 + 929 + 930 + 931 + 932 + 933 + 934 + 935 + 936 + 937 + 938 + 939 + 940 + 941 + 942 + 943 + 944 + 945 + 946 + 947 + 948 + 949 + 950 + 951 + 952 + 953 + 954 + 955 + 956 + 957 + 958 + 959 + 960 + 961 + 962 + 963 + 964 + 965 + 966 + 967 + 968 + 969 + 970 + 971 + 972 + 973 + 974 + 975 + 976 + 977 + 978 + 979 + 980 + 981 + 982 + 983 + 984 + 985 + 986 + 987 + 988 + 989 + 990 + 991 + 992 + 993 + 994 + 995 + 996 + 997 + 998 + 999 +1000 +1001 +1002 +1003 +1004 +1005 +1006 +1007 +1008 +1009 +1010 +1011 +1012 +1013 +1014 +1015 +1016 +1017 +1018 +1019 +1020 +1021 +1022 +1023 +1024 +1025 +1026 +1027 +1028 +1029 +1030 +1031 +1032 +1033 +1034 +1035 +1036 +1037 +1038 +1039 +1040 +1041 +1042 +1043 +1044 +1045 +1046 +1047 +1048 +1049 +1050 +1051 +1052 +1053 +1054 +1055 +1056 +1057 +1058 +1059 +1060 +1061 +1062 +1063 +1064 +1065 +1066 +1067 +1068 +1069 +1070 +1071 +1072 +1073 +1074 +1075 +1076 +1077 +1078 +1079 +1080 +1081 +1082 +1083 +1084 +1085 +1086 +1087 +1088 +1089 +1090 +1091 +1092 +1093 +1094 +1095 +1096 +1097 +1098 +1099 +1100 +1101 +1102 +1103 +1104 +1105 +1106 +1107 +1108 +1109 +1110 +1111 +1112 +1113 +1114 +1115 +1116 +1117 +1118 +1119 +1120 +1121 +1122 +1123 +1124 +1125 +1126 +1127 +1128 +1129 +1130 +1131 +1132 +1133 +1134 +1135 +1136 +1137 +1138 +1139 +1140 +1141 +1142 +1143 +1144 +1145 +1146 +1147 +1148 +1149 +1150 +1151 +1152 +1153 +1154 +1155 +1156 +1157 +1158 +1159 +1160 +1161 +1162 +1163 +1164 +1165 +1166 +1167 +1168 +1169 +1170 +1171 +1172 +1173 +1174 +1175 |
|
__init__(agency_chart, shared_instructions='', shared_files=None, async_mode=None, send_message_tool_class=SendMessage, settings_path='./settings.json', settings_callbacks=None, threads_callbacks=None, temperature=0.3, top_p=1.0, max_prompt_tokens=None, max_completion_tokens=None, truncation_strategy=None)
+
+Initializes the Agency object, setting up agents, threads, and core functionalities.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ agency_chart
+ |
+
+ List
+ |
+
+
+
+ The structure defining the hierarchy and interaction of agents within the agency. + |
+ + required + | +
+ shared_instructions
+ |
+
+ str
+ |
+
+
+
+ A path to a file containing shared instructions for all agents. Defaults to an empty string. + |
+
+ ''
+ |
+
+ shared_files
+ |
+
+ Union[str, List[str]]
+ |
+
+
+
+ A path to a folder or a list of folders containing shared files for all agents. Defaults to None. + |
+
+ None
+ |
+
+ async_mode
+ |
+
+ str
+ |
+
+
+
+ Specifies the mode for asynchronous processing. In "threading" mode, all sub-agents run in separate threads. In "tools_threading" mode, all tools run in separate threads, but agents do not. Defaults to None. + |
+
+ None
+ |
+
+ send_message_tool_class
+ |
+
+ Type[SendMessageBase]
+ |
+
+
+
+ The class to use for the send_message tool. For async communication, use |
+
+ SendMessage
+ |
+
+ settings_path
+ |
+
+ str
+ |
+
+
+
+ The path to the settings file for the agency. Must be json. If file does not exist, it will be created. Defaults to None. + |
+
+ './settings.json'
+ |
+
+ settings_callbacks
+ |
+
+ SettingsCallbacks
+ |
+
+
+
+ A dictionary containing functions to load and save settings for the agency. The keys must be "load" and "save". Both values must be defined. Defaults to None. + |
+
+ None
+ |
+
+ threads_callbacks
+ |
+
+ ThreadsCallbacks
+ |
+
+
+
+ A dictionary containing functions to load and save threads for the agency. The keys must be "load" and "save". Both values must be defined. Defaults to None. + |
+
+ None
+ |
+
+ temperature
+ |
+
+ float
+ |
+
+
+
+ The temperature value to use for the agents. Agent-specific values will override this. Defaults to 0.3. + |
+
+ 0.3
+ |
+
+ top_p
+ |
+
+ float
+ |
+
+
+
+ The top_p value to use for the agents. Agent-specific values will override this. Defaults to None. + |
+
+ 1.0
+ |
+
+ max_prompt_tokens
+ |
+
+ int
+ |
+
+
+
+ The maximum number of tokens allowed in the prompt for each agent. Agent-specific values will override this. Defaults to None. + |
+
+ None
+ |
+
+ max_completion_tokens
+ |
+
+ int
+ |
+
+
+
+ The maximum number of tokens allowed in the completion for each agent. Agent-specific values will override this. Defaults to None. + |
+
+ None
+ |
+
+ truncation_strategy
+ |
+
+ dict
+ |
+
+
+
+ The truncation strategy to use for the completion for each agent. Agent-specific values will override this. Defaults to None. + |
+
+ None
+ |
+
This constructor initializes various components of the Agency, including CEO, agents, threads, and user interactions. It parses the agency chart to set up the organizational structure and initializes the messaging tools, agents, and threads necessary for the operation of the agency. Additionally, it prepares a main thread for user interactions.
+ +agency_swarm/agency/agency.py
50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 |
|
delete()
+
+This method deletes the agency and all its agents, cleaning up any files and vector stores associated with each agent.
+ + +demo_gradio(height=450, dark_mode=True, **kwargs)
+
+Launches a Gradio-based demo interface for the agency chatbot.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ height
+ |
+
+ int
+ |
+
+
+
+ The height of the chatbot widget in the Gradio interface. Default is 600. + |
+
+ 450
+ |
+
+ dark_mode
+ |
+
+ bool
+ |
+
+
+
+ Flag to determine if the interface should be displayed in dark mode. Default is True. + |
+
+ True
+ |
+
+ **kwargs
+ |
+ + | +
+
+
+ Additional keyword arguments to be passed to the Gradio interface. + |
+
+ {}
+ |
+
This method sets up and runs a Gradio interface, allowing users to interact with the agency's chatbot. It includes a text input for the user's messages and a chatbot interface for displaying the conversation. The method handles user input and chatbot responses, updating the interface dynamically.
+ +agency_swarm/agency/agency.py
279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 |
|
get_completion(message, message_files=None, yield_messages=False, recipient_agent=None, additional_instructions=None, attachments=None, tool_choice=None, verbose=False, response_format=None)
+
+Retrieves the completion for a given message from the main thread.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ message
+ |
+
+ str
+ |
+
+
+
+ The message for which completion is to be retrieved. + |
+ + required + | +
+ message_files
+ |
+
+ list
+ |
+
+
+
+ A list of file ids to be sent as attachments with the message. When using this parameter, files will be assigned both to file_search and code_interpreter tools if available. It is recommended to assign files to the most sutiable tool manually, using the attachments parameter. Defaults to None. + |
+
+ None
+ |
+
+ yield_messages
+ |
+
+ bool
+ |
+
+
+
+ Flag to determine if intermediate messages should be yielded. Defaults to True. + |
+
+ False
+ |
+
+ recipient_agent
+ |
+
+ Agent
+ |
+
+
+
+ The agent to which the message should be sent. Defaults to the first agent in the agency chart. + |
+
+ None
+ |
+
+ additional_instructions
+ |
+
+ str
+ |
+
+
+
+ Additional instructions to be sent with the message. Defaults to None. + |
+
+ None
+ |
+
+ attachments
+ |
+
+ List[dict]
+ |
+
+
+
+ A list of attachments to be sent with the message, following openai format. Defaults to None. + |
+
+ None
+ |
+
+ tool_choice
+ |
+
+ dict
+ |
+
+
+
+ The tool choice for the recipient agent to use. Defaults to None. + |
+
+ None
+ |
+
+ parallel_tool_calls
+ |
+
+ bool
+ |
+
+
+
+ Whether to enable parallel function calling during tool use. Defaults to True. + |
+ + required + | +
+ verbose
+ |
+
+ bool
+ |
+
+
+
+ Whether to print the intermediary messages in console. Defaults to False. + |
+
+ False
+ |
+
+ response_format
+ |
+
+ dict
+ |
+
+
+
+ The response format to use for the completion. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Generator or final response: Depending on the 'yield_messages' flag, this method returns either a generator yielding intermediate messages or the final response from the main thread. + |
+
agency_swarm/agency/agency.py
get_completion_parse(message, response_format, message_files=None, recipient_agent=None, additional_instructions=None, attachments=None, tool_choice=None, verbose=False)
+
+Retrieves the completion for a given message from the main thread and parses the response using the provided pydantic model.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ message
+ |
+
+ str
+ |
+
+
+
+ The message for which completion is to be retrieved. + |
+ + required + | +
+ response_format
+ |
+
+ type(BaseModel
+ |
+
+
+
+ The response format to use for the completion. + |
+ + required + | +
+ message_files
+ |
+
+ list
+ |
+
+
+
+ A list of file ids to be sent as attachments with the message. When using this parameter, files will be assigned both to file_search and code_interpreter tools if available. It is recommended to assign files to the most sutiable tool manually, using the attachments parameter. Defaults to None. + |
+
+ None
+ |
+
+ recipient_agent
+ |
+
+ Agent
+ |
+
+
+
+ The agent to which the message should be sent. Defaults to the first agent in the agency chart. + |
+
+ None
+ |
+
+ additional_instructions
+ |
+
+ str
+ |
+
+
+
+ Additional instructions to be sent with the message. Defaults to None. + |
+
+ None
+ |
+
+ attachments
+ |
+
+ List[dict]
+ |
+
+
+
+ A list of attachments to be sent with the message, following openai format. Defaults to None. + |
+
+ None
+ |
+
+ tool_choice
+ |
+
+ dict
+ |
+
+
+
+ The tool choice for the recipient agent to use. Defaults to None. + |
+
+ None
+ |
+
+ verbose
+ |
+
+ bool
+ |
+
+
+
+ Whether to print the intermediary messages in console. Defaults to False. + |
+
+ False
+ |
+
Returns:
+Type | +Description | +
---|---|
+ T
+ |
+
+
+
+ Final response: The final response from the main thread, parsed using the provided pydantic model. + |
+
agency_swarm/agency/agency.py
get_completion_stream(message, event_handler, message_files=None, recipient_agent=None, additional_instructions=None, attachments=None, tool_choice=None, response_format=None)
+
+Generates a stream of completions for a given message from the main thread.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ message
+ |
+
+ str
+ |
+
+
+
+ The message for which completion is to be retrieved. + |
+ + required + | +
+ event_handler
+ |
+
+ type(AgencyEventHandler
+ |
+
+
+
+ The event handler class to handle the completion stream. https://github.com/openai/openai-python/blob/main/helpers.md + |
+ + required + | +
+ message_files
+ |
+
+ list
+ |
+
+
+
+ A list of file ids to be sent as attachments with the message. When using this parameter, files will be assigned both to file_search and code_interpreter tools if available. It is recommended to assign files to the most sutiable tool manually, using the attachments parameter. Defaults to None. + |
+
+ None
+ |
+
+ recipient_agent
+ |
+
+ Agent
+ |
+
+
+
+ The agent to which the message should be sent. Defaults to the first agent in the agency chart. + |
+
+ None
+ |
+
+ additional_instructions
+ |
+
+ str
+ |
+
+
+
+ Additional instructions to be sent with the message. Defaults to None. + |
+
+ None
+ |
+
+ attachments
+ |
+
+ List[dict]
+ |
+
+
+
+ A list of attachments to be sent with the message, following openai format. Defaults to None. + |
+
+ None
+ |
+
+ tool_choice
+ |
+
+ dict
+ |
+
+
+
+ The tool choice for the recipient agent to use. Defaults to None. + |
+
+ None
+ |
+
+ parallel_tool_calls
+ |
+
+ bool
+ |
+
+
+
+ Whether to enable parallel function calling during tool use. Defaults to True. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ Final response: Final response from the main thread. + |
+
agency_swarm/agency/agency.py
get_customgpt_schema(url)
+
+Returns the OpenAPI schema for the agency from the CEO agent, that you can use to integrate with custom gpts.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ url
+ |
+
+ str
+ |
+
+
+
+ Your server url where the api will be hosted. + |
+ + required + | +
agency_swarm/agency/agency.py
run_demo()
+
+Executes agency in the terminal with autocomplete for recipient agent names.
+ +agency_swarm/agency/agency.py
666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 +678 +679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 +696 +697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 +717 +718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 +740 +741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 +761 +762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 +783 +784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 +798 |
|
ToolFactory
+
+
+agency_swarm/tools/ToolFactory.py
21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 |
|
from_file(file_path)
+
+
+ staticmethod
+
+
+Dynamically imports a BaseTool class from a Python file within a package structure.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ file_path
+ |
+
+ str
+ |
+
+
+
+ The file path to the Python file containing the BaseTool class. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Type[BaseTool]
+ |
+
+
+
+ The imported BaseTool class. + |
+
agency_swarm/tools/ToolFactory.py
from_langchain_tool(tool)
+
+
+ staticmethod
+
+
+Converts a langchain tool into a BaseTool.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ tool
+ |
+ + | +
+
+
+ The langchain tool to convert. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Type[BaseTool]
+ |
+
+
+
+ A BaseTool. + |
+
agency_swarm/tools/ToolFactory.py
from_langchain_tools(tools)
+
+
+ staticmethod
+
+
+Converts a list of langchain tools into a list of BaseTools.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ tools
+ |
+
+ List
+ |
+
+
+
+ The langchain tools to convert. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ List[Type[BaseTool]]
+ |
+
+
+
+ A list of BaseTools. + |
+
agency_swarm/tools/ToolFactory.py
from_openai_schema(schema, callback)
+
+
+ staticmethod
+
+
+Converts an OpenAI schema into a BaseTool.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ schema
+ |
+
+ Dict[str, Any]
+ |
+
+
+
+ The OpenAI schema to convert. + |
+ + required + | +
+ callback
+ |
+
+ Any
+ |
+
+
+
+ The function to run when the tool is called. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Type[BaseTool]
+ |
+
+
+
+ A BaseTool. + |
+
agency_swarm/tools/ToolFactory.py
from_openapi_schema(schema, headers=None, params=None, strict=False)
+
+
+ staticmethod
+
+
+Converts an OpenAPI schema into a list of BaseTools.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ schema
+ |
+
+ Union[str, dict]
+ |
+
+
+
+ The OpenAPI schema to convert. + |
+ + required + | +
+ headers
+ |
+
+ Dict[str, str]
+ |
+
+
+
+ The headers to use for requests. + |
+
+ None
+ |
+
+ params
+ |
+
+ Dict[str, Any]
+ |
+
+
+
+ The parameters to use for requests. + |
+
+ None
+ |
+
+ strict
+ |
+
+ bool
+ |
+
+
+
+ Whether to use strict OpenAI mode. + |
+
+ False
+ |
+
Returns: + A list of BaseTools.
+ +agency_swarm/tools/ToolFactory.py
128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 |
|
get_openapi_schema(tools, url, title='Agent Tools', description='A collection of tools.')
+
+
+ staticmethod
+
+
+Generates an OpenAPI schema from a list of BaseTools.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ tools
+ |
+
+ List[Type[BaseTool]]
+ |
+
+
+
+ BaseTools to generate the schema from. + |
+ + required + | +
+ url
+ |
+
+ str
+ |
+
+
+
+ The base URL for the schema. + |
+ + required + | +
+ title
+ |
+ + | +
+
+
+ The title of the schema. + |
+
+ 'Agent Tools'
+ |
+
+ description
+ |
+ + | +
+
+
+ The description of the schema. + |
+
+ 'A collection of tools.'
+ |
+
Returns:
+Type | +Description | +
---|---|
+ str
+ |
+
+
+
+ A JSON string representing the OpenAPI schema with all the tools combined as separate endpoints. + |
+
agency_swarm/tools/ToolFactory.py
277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 |
|