diff --git a/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_chunk_sizes.ipynb b/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_chunk_sizes.ipynb
index 1ee15282..6d9f3dba 100644
--- a/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_chunk_sizes.ipynb
+++ b/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_chunk_sizes.ipynb
@@ -118,17 +118,16 @@
"metadata": {},
"outputs": [],
"source": [
+ "from langchain.callbacks.manager import CallbackManager\n",
+ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
+ "from langchain.chat_models import ChatFireworks, ChatOpenAI\n",
"from langchain.document_loaders import PyPDFLoader\n",
- "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
- "from langchain.vectorstores import Chroma\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
- "from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
- "from langchain.chat_models import ChatFireworks\n",
- "from langchain.callbacks.manager import CallbackManager\n",
- "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
+ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
+ "from langchain.vectorstores import Chroma\n",
"\n",
"\n",
"def load_and_split(file, token_count, split_document=True):\n",
@@ -262,8 +261,9 @@
"outputs": [],
"source": [
"import uuid\n",
- "from langsmith.client import Client\n",
+ "\n",
"from langchain.smith import RunEvalConfig\n",
+ "from langsmith.client import Client\n",
"\n",
"# Config\n",
"client = Client()\n",
diff --git a/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_long_context.ipynb b/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_long_context.ipynb
index 2e7f2aa8..924287c9 100644
--- a/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_long_context.ipynb
+++ b/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_long_context.ipynb
@@ -160,8 +160,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from langchain.chat_models import ChatOpenAI\n",
- "from langchain.chat_models import ChatAnthropic\n",
+ "from langchain.chat_models import ChatAnthropic, ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
@@ -221,8 +220,9 @@
"outputs": [],
"source": [
"import uuid\n",
- "from langsmith.client import Client\n",
+ "\n",
"from langchain.smith import RunEvalConfig\n",
+ "from langsmith.client import Client\n",
"\n",
"# Config\n",
"client = Client()\n",
@@ -281,8 +281,8 @@
}
],
"source": [
- "import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
"\n",
"\n",
"def find_all_phrase_locations(phrases, text):\n",
diff --git a/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_multi_vector.ipynb b/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_multi_vector.ipynb
index 04640379..edcf3949 100644
--- a/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_multi_vector.ipynb
+++ b/docs/source/notebooks/retrieval/semi_structured_benchmarking/ss_eval_multi_vector.ipynb
@@ -380,8 +380,9 @@
"outputs": [],
"source": [
"import uuid\n",
- "from langsmith.client import Client\n",
+ "\n",
"from langchain.smith import RunEvalConfig\n",
+ "from langsmith.client import Client\n",
"\n",
"# Config\n",
"client = Client()\n",
diff --git a/docs/source/notebooks/tool_usage/benchmark_all_tasks.ipynb b/docs/source/notebooks/tool_usage/benchmark_all_tasks.ipynb
index a574bb2f..d2db828f 100644
--- a/docs/source/notebooks/tool_usage/benchmark_all_tasks.ipynb
+++ b/docs/source/notebooks/tool_usage/benchmark_all_tasks.ipynb
@@ -14,8 +14,8 @@
},
{
"cell_type": "code",
- "execution_count": null,
- "id": "ddadb9ef-e76a-4b48-85e4-f62c3957f502",
+ "execution_count": 1,
+ "id": "13a7483b-d08f-49fa-83da-619863171e5b",
"metadata": {
"tags": []
},
@@ -24,10 +24,114 @@
"import datetime\n",
"import uuid\n",
"\n",
+ "from langchain.globals import set_verbose\n",
"from langsmith.client import Client\n",
"\n",
- "from langchain_benchmarks import clone_public_dataset, registry\n",
- "from langchain_benchmarks.tool_usage import agents"
+ "from langchain_benchmarks import (\n",
+ " __version__,\n",
+ " clone_public_dataset,\n",
+ " model_registry,\n",
+ " registry,\n",
+ ")\n",
+ "from langchain_benchmarks.rate_limiting import RateLimiter\n",
+ "from langchain_benchmarks.tool_usage.agents import (\n",
+ " CustomAgentFactory,\n",
+ " OpenAIAgentFactory,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "50bbe23b-a3b1-4607-929d-ea6e88b7085e",
+ "metadata": {},
+ "source": [
+ "Prior to starting the tests, you may want to verify\n",
+ "that the task that you're working with and the models are propelry defined."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "adfbcaa9-349c-4223-89be-4abff9cf76ff",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'input': \"Repeat the given string using the provided tools. Do not write anything else or provide any explanations. For example, if the string is 'abc', you must print the letters 'a', 'b', and 'c' one at a time and in that order. \\nWrite down your answer, but do not explain it. Input: `abc`\",\n",
+ " 'output': ' Thank you for the input and for confirming the output of each letter I printed. I simply followed the instructions to repeat the given string \"abc\" by printing one letter at a time using the provided \"type_letter\" tool without any additional explanations. Please let me know if you need me to repeat this process with a different input string.',\n",
+ " 'intermediate_steps': [(AgentActionMessageLog(tool='type_letter', tool_input={'letter': 'a'}, log=\"\\nInvoking type_letter: {'letter': 'a'}\\n\\t\", message_log=[AIMessage(content='{\\n \"tool_name\": \"type_letter\",\\n \"arguments\": {\\n \"letter\": \"a\"\\n }\\n}\\n')]),\n",
+ " 'OK'),\n",
+ " (AgentActionMessageLog(tool='type_letter', tool_input={'letter': 'b'}, log=\"\\nInvoking type_letter: {'letter': 'b'}\\n\\t\", message_log=[AIMessage(content='{\\n \"tool_name\": \"type_letter\",\\n \"arguments\": {\\n \"letter\": \"b\"\\n }\\n}\\n')]),\n",
+ " 'OK'),\n",
+ " (AgentActionMessageLog(tool='type_letter', tool_input={'letter': 'c'}, log=\"\\nInvoking type_letter: {'letter': 'c'}\\n\\t\", message_log=[AIMessage(content='{\\n \"tool_name\": \"type_letter\",\\n \"arguments\": {\\n \"letter\": \"c\"\\n }\\n}\\n')]),\n",
+ " 'OK')],\n",
+ " 'state': 'abc'}"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "task = registry[\"Tool Usage - Typewriter (1 tool)\"]\n",
+ "agent_factory = CustomAgentFactory(task, \"claude-2.1\")\n",
+ "\n",
+ "agent_factory().invoke({\"question\": \"abc\"})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "65b32e7d-3986-4461-8a3b-8e9b6d4008cb",
+ "metadata": {},
+ "source": [
+ "Define the test cases"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "26d390b6-9ade-424c-aabb-d450f52ed121",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "tests = [\n",
+ " # 2-tuple of (architecture, model name)\n",
+ " (\"xml\", \"mixtral-8x7b-instruct-fw\"),\n",
+ " (\"xml\", \"claude-2.1\"),\n",
+ " (\"xml\", \"claude-2\"),\n",
+ " (\"xml\", \"yi-34b-200k-fw\"),\n",
+ " (\"xml\", \"llama-v2-70b-chat-fw\"),\n",
+ " (\"xml\", \"llama-v2-13b-chat-fw\"),\n",
+ " (\"openai_functions\", \"gpt-3.5-turbo-1106\"),\n",
+ " (\"openai_functions\", \"gpt-3.5-turbo-0613\"),\n",
+ " (\"openai_functions\", \"gpt-4-1106-preview\")(\"openai_functions\", \"gpt-4-0613\"),\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b55b7c24-8b4d-4bd7-8b00-365fbe61897f",
+ "metadata": {},
+ "source": [
+ "## Run"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "a415dd82-2e70-4173-a3f3-8e1aac60db9e",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "experiment_uuid = uuid.uuid4().hex[:4]"
]
},
{
@@ -39,10 +143,9 @@
},
"outputs": [],
"source": [
- "experiment_uuid = uuid.uuid4().hex[:4]\n",
- "models = [\"gpt-3.5-turbo-16k\"]\n",
"client = Client() # Launch langsmith client for cloning datasets\n",
"today = datetime.date.today().isoformat()\n",
+ "rate_limiter = RateLimiter(requests_per_second=1)\n",
"\n",
"for task in registry:\n",
" dataset_name = task.name + f\"_benchmarking_{today}\"\n",
@@ -50,27 +153,41 @@
"\n",
" if task.type != \"ToolUsageTask\":\n",
" continue\n",
- " for model in models:\n",
+ "\n",
+ " for arch, model in tests:\n",
" print()\n",
- " print(f\"Benchmarking {task.name} with model: {model}\")\n",
+ " print(f\"Benchmarking {task.name} with model: {model} and arch: {arch}\")\n",
" eval_config = task.get_eval_config()\n",
- " agent_factory = agents.OpenAIAgentFactory(task, model=model)\n",
+ "\n",
+ " if arch == \"openai_functions\":\n",
+ " agent_factory = OpenAIAgentFactory(\n",
+ " task, model=model, rate_limiter=rate_limiter\n",
+ " )\n",
+ " elif arch == \"xml\":\n",
+ " agent_factory = CustomAgentFactory(\n",
+ " task, model=model, rate_limiter=rate_limiter\n",
+ " )\n",
+ " else:\n",
+ " raise ValueError()\n",
"\n",
" client.run_on_dataset(\n",
" dataset_name=dataset_name,\n",
" llm_or_chain_factory=agent_factory,\n",
" evaluation=eval_config,\n",
" verbose=False,\n",
- " project_name=f\"{dataset_name}-{model}-{experiment_uuid}\",\n",
+ " project_name=f\"{model}{experiment_uuid}\",\n",
" tags=[model],\n",
- " concurrency_level=1,\n",
+ " concurrency_level=5,\n",
" project_metadata={\n",
" \"model\": model,\n",
" \"id\": experiment_uuid,\n",
" \"task\": task.name,\n",
" \"date\": today,\n",
+ " \"langchain_benchmarks_version\": __version__,\n",
+ " \"arch\": arch,\n",
" },\n",
- " )"
+ " )\n",
+ " break"
]
}
],
diff --git a/langchain_benchmarks/model_registration.py b/langchain_benchmarks/model_registration.py
index d2a5a39b..06bba899 100644
--- a/langchain_benchmarks/model_registration.py
+++ b/langchain_benchmarks/model_registration.py
@@ -192,6 +192,15 @@
"model": "accounts/fireworks/models/llama-v2-70b-chat",
},
),
+ RegisteredModel(
+ provider="fireworks",
+ name="yi-34b-200k-fw",
+ type="llm",
+ description=" 4B LLM model from 01.ai, with context window 200k.",
+ params={
+ "model": "accounts/fireworks/models/yi-34b-200k",
+ },
+ ),
RegisteredModel(
provider="fireworks",
name="mixtral-8x7b-instruct-fw",
diff --git a/pyproject.toml b/pyproject.toml
index de62e59f..87a5d53d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-benchmarks"
-version = "0.0.8"
+version = "0.0.9"
description = "🦜💪 Flex those feathers!"
authors = ["LangChain AI"]
license = "MIT"