Skip to content

Commit

Permalink
chore: Merge OSS (#512)
Browse files Browse the repository at this point in the history
  • Loading branch information
mattzh72 authored Jan 6, 2025
1 parent f388e8f commit c3cc35d
Show file tree
Hide file tree
Showing 12 changed files with 38 additions and 15 deletions.
2 changes: 1 addition & 1 deletion letta/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.6.6"
__version__ = "0.6.7"

# import clients
from letta.client.client import LocalClient, RESTClient, create_client
Expand Down
7 changes: 4 additions & 3 deletions letta/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,8 +224,8 @@ def execute_tool_and_persist_state(self, function_name: str, function_args: dict
)
function_response, updated_agent_state = sandbox_run_result.func_return, sandbox_run_result.agent_state
assert orig_memory_str == self.agent_state.memory.compile(), "Memory should not be modified in a sandbox tool"

self.update_memory_if_change(updated_agent_state.memory)
if updated_agent_state is not None:
self.update_memory_if_change(updated_agent_state.memory)
except Exception as e:
# Need to catch error here, or else trunction wont happen
# TODO: modify to function execution error
Expand All @@ -238,7 +238,7 @@ def execute_tool_and_persist_state(self, function_name: str, function_args: dict
def _get_ai_reply(
self,
message_sequence: List[Message],
function_call: str = "auto",
function_call: Optional[str] = None,
first_message: bool = False,
stream: bool = False, # TODO move to config?
empty_response_retry_limit: int = 3,
Expand Down Expand Up @@ -1029,6 +1029,7 @@ def get_context_window(self) -> ContextWindowOverview:
num_archival_memory=agent_manager_passage_size,
num_recall_memory=message_manager_size,
num_tokens_external_memory_summary=num_tokens_external_memory_summary,
external_memory_summary=external_memory_summary,
# top-level information
context_window_size_max=self.agent_state.llm_config.context_window,
context_window_size_current=num_tokens_used_total,
Expand Down
1 change: 0 additions & 1 deletion letta/cli/cli_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ def list(arg: Annotated[ListChoice, typer.Argument]):
table.field_names = ["Name", "Text"]
for human in client.list_humans():
table.add_row([human.template_name, human.value.replace("\n", "")[:100]])
print(table)
elif arg == ListChoice.personas:
"""List all personas"""
table.field_names = ["Name", "Text"]
Expand Down
10 changes: 8 additions & 2 deletions letta/llm_api/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,8 @@ def unpack_all_inner_thoughts_from_kwargs(

def unpack_inner_thoughts_from_kwargs(choice: Choice, inner_thoughts_key: str) -> Choice:
message = choice.message
rewritten_choice = choice # inner thoughts unpacked out of the function

if message.role == "assistant" and message.tool_calls and len(message.tool_calls) >= 1:
if len(message.tool_calls) > 1:
warnings.warn(f"Unpacking inner thoughts from more than one tool call ({len(message.tool_calls)}) is not supported")
Expand All @@ -271,14 +273,18 @@ def unpack_inner_thoughts_from_kwargs(choice: Choice, inner_thoughts_key: str) -
warnings.warn(f"Overwriting existing inner monologue ({new_choice.message.content}) with kwarg ({inner_thoughts})")
new_choice.message.content = inner_thoughts

return new_choice
# update the choice object
rewritten_choice = new_choice
else:
warnings.warn(f"Did not find inner thoughts in tool call: {str(tool_call)}")
return choice

except json.JSONDecodeError as e:
warnings.warn(f"Failed to strip inner thoughts from kwargs: {e}")
raise e
else:
warnings.warn(f"Did not find tool call in message: {str(message)}")

return rewritten_choice


def is_context_overflow_error(exception: Union[requests.exceptions.RequestException, Exception]) -> bool:
Expand Down
11 changes: 10 additions & 1 deletion letta/llm_api/llm_api_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def create(
user_id: Optional[str] = None, # option UUID to associate request with
functions: Optional[list] = None,
functions_python: Optional[dict] = None,
function_call: str = "auto",
function_call: Optional[str] = None, # see: https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice
# hint
first_message: bool = False,
force_tool_call: Optional[str] = None, # Force a specific tool to be called
Expand Down Expand Up @@ -132,10 +132,19 @@ def create(

# openai
if llm_config.model_endpoint_type == "openai":

if model_settings.openai_api_key is None and llm_config.model_endpoint == "https://api.openai.com/v1":
# only is a problem if we are *not* using an openai proxy
raise LettaConfigurationError(message="OpenAI key is missing from letta config file", missing_fields=["openai_api_key"])

if function_call is None and functions is not None and len(functions) > 0:
# force function calling for reliability, see https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice
# TODO(matt) move into LLMConfig
if llm_config.model_endpoint == "https://inference.memgpt.ai":
function_call = "auto" # TODO change to "required" once proxy supports it
else:
function_call = "required"

data = build_openai_chat_completions_request(llm_config, messages, user_id, functions, function_call, use_tool_naming, max_tokens)
if stream: # Client requested token streaming
data.stream = True
Expand Down
2 changes: 1 addition & 1 deletion letta/schemas/letta_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def get_formatted_content(msg):
return f'<div class="content"><span class="function-name">{html.escape(msg.function_call.name)}</span>({args})</div>'
elif msg.message_type == "tool_call_message":
args = format_json(msg.tool_call.arguments)
return f'<div class="content"><span class="function-name">{html.escape(msg.function_call.name)}</span>({args})</div>'
return f'<div class="content"><span class="function-name">{html.escape(msg.tool_call.name)}</span>({args})</div>'
elif msg.message_type == "function_return":
return_value = format_json(msg.function_return)
# return f'<div class="status-line">Status: {html.escape(msg.status)}</div><div class="content">{return_value}</div>'
Expand Down
3 changes: 3 additions & 0 deletions letta/schemas/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ class ContextWindowOverview(BaseModel):
num_tokens_external_memory_summary: int = Field(
..., description="The number of tokens in the external memory summary (archival + recall metadata)."
)
external_memory_summary: str = Field(
..., description="The metadata summary of the external memory sources (archival + recall metadata)."
)

# context window breakdown (in tokens)
# this should all add up to context_window_size_current
Expand Down
2 changes: 1 addition & 1 deletion letta/services/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def rebuild_system_prompt(self, agent_id: str, actor: PydanticUser, force=False,
curr_memory_str = agent_state.memory.compile()
if curr_memory_str in curr_system_message_openai["content"] and not force:
# NOTE: could this cause issues if a block is removed? (substring match would still work)
logger.info(
logger.debug(
f"Memory hasn't changed for agent id={agent_id} and actor=({actor.id}, {actor.name}), skipping system prompt rebuild"
)
return agent_state
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "letta"
version = "0.6.6"
version = "0.6.7"
packages = [
{include = "letta"}
]
Expand Down
6 changes: 5 additions & 1 deletion tests/helpers/endpoints_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,11 @@ def check_first_response_is_valid_for_llm_endpoint(filename: str) -> ChatComplet
choice = response.choices[0]

# Ensure that the first message returns a "send_message"
validator_func = lambda function_call: function_call.name == "send_message" or function_call.name == "archival_memory_search"
validator_func = (
lambda function_call: function_call.name == "send_message"
or function_call.name == "archival_memory_search"
or function_call.name == "core_memory_append"
)
assert_contains_valid_function_call(choice.message, validator_func)

# Assert that the message has an inner monologue
Expand Down
6 changes: 3 additions & 3 deletions tests/integration_test_agent_tool_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def second_secret_word(prev_secret_word: str):
prev_secret_word (str): The secret word retrieved from calling first_secret_word.
"""
if prev_secret_word != "v0iq020i0g":
raise RuntimeError(f"Expected secret {"v0iq020i0g"}, got {prev_secret_word}")
raise RuntimeError(f"Expected secret {'v0iq020i0g'}, got {prev_secret_word}")

return "4rwp2b4gxq"

Expand All @@ -51,7 +51,7 @@ def third_secret_word(prev_secret_word: str):
prev_secret_word (str): The secret word retrieved from calling second_secret_word.
"""
if prev_secret_word != "4rwp2b4gxq":
raise RuntimeError(f"Expected secret {"4rwp2b4gxq"}, got {prev_secret_word}")
raise RuntimeError(f'Expected secret "4rwp2b4gxq", got {prev_secret_word}')

return "hj2hwibbqm"

Expand All @@ -64,7 +64,7 @@ def fourth_secret_word(prev_secret_word: str):
prev_secret_word (str): The secret word retrieved from calling third_secret_word.
"""
if prev_secret_word != "hj2hwibbqm":
raise RuntimeError(f"Expected secret {"hj2hwibbqm"}, got {prev_secret_word}")
raise RuntimeError(f"Expected secret {'hj2hwibbqm'}, got {prev_secret_word}")

return "banana"

Expand Down
1 change: 1 addition & 0 deletions tests/test_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,6 +508,7 @@ def test_get_context_window_overview(server: SyncServer, user, agent_id):
assert overview.num_archival_memory is not None
assert overview.num_recall_memory is not None
assert overview.num_tokens_external_memory_summary is not None
assert overview.external_memory_summary is not None
assert overview.num_tokens_system is not None
assert overview.system_prompt is not None
assert overview.num_tokens_core_memory is not None
Expand Down

0 comments on commit c3cc35d

Please sign in to comment.