Skip to content

Commit

Permalink
apply fix
Browse files Browse the repository at this point in the history
  • Loading branch information
mattzh72 authored and sarahwooders committed Dec 17, 2024
1 parent 8623811 commit 420a6cc
Show file tree
Hide file tree
Showing 5 changed files with 129 additions and 17 deletions.
33 changes: 27 additions & 6 deletions letta/llm_api/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,9 @@ def convert_anthropic_response_to_chatcompletion(
if isinstance(response_json["content"], list):
if len(response_json["content"]) > 1:
# inner mono + function call
assert len(response_json["content"]) == 2, response_json
assert response_json["content"][0]["type"] == "text", response_json
assert response_json["content"][1]["type"] == "tool_use", response_json
assert len(response_json["content"]) == 2, f"Unexpected content length: {response_json}"
assert response_json["content"][0]["type"] == "text", f"Expected text, got: {response_json['content'][0]}"
assert response_json["content"][1]["type"] == "tool_use", f"Expected tool_use, got: {response_json['content'][1]}"
content = strip_xml_tags(string=response_json["content"][0]["text"], tag=inner_thoughts_xml_tag)
tool_calls = [
ToolCall(
Expand All @@ -258,10 +258,31 @@ def convert_anthropic_response_to_chatcompletion(
),
)
]
elif len(response_json["content"]) == 1:
# Only tool call or just inner mono
first_item = response_json["content"][0]
if first_item["type"] == "text":
# Just inner mono
content = strip_xml_tags(string=first_item["text"], tag=inner_thoughts_xml_tag)
tool_calls = None
elif first_item["type"] == "tool_use":
# Only tool call, no inner mono
content = None # No inner mono to extract
tool_calls = [
ToolCall(
id=first_item["id"],
type="function",
function=FunctionCall(
name=first_item["name"],
arguments=json.dumps(first_item["input"], indent=2),
),
)
]
else:
raise ValueError(f"Unexpected type in content: {first_item}")
else:
# Just inner mono
content = strip_xml_tags(string=response_json["content"][0]["text"], tag=inner_thoughts_xml_tag)
tool_calls = None
# Empty content list
raise ValueError(f"Empty content in response: {response_json}")
else:
raise RuntimeError("Unexpected type for content in response_json.")

Expand Down
10 changes: 10 additions & 0 deletions letta/offline_memory_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,18 @@ def trigger_rethink_memory(agent_state: "AgentState", message: Optional[str]) ->
"""
from letta import create_client
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig

client = create_client()
ANTHROPIC_CONFIG = LLMConfig(
model_endpoint_type="anthropic",
model_endpoint="https://api.anthropic.com/v1",
model="claude-3-5-haiku-20241022",
context_window=32000,
)
client.set_default_llm_config(ANTHROPIC_CONFIG)
client.set_default_embedding_config(EmbeddingConfig.default_config(model_name="letta"))
agents = client.list_agents()
for agent in agents:
if agent.agent_type == "offline_memory_agent":
Expand Down
5 changes: 2 additions & 3 deletions letta/server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -455,9 +455,8 @@ def _step(
# save agent after step
save_agent(letta_agent)

except Exception as e:
logger.error(f"Error in server._step: {e}")
print(traceback.print_exc())
except Exception:
logger.exception("Error in server._step")
raise
finally:
logger.debug("Calling step_yield()")
Expand Down
10 changes: 4 additions & 6 deletions letta/services/tool_execution_sandbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,6 @@ def run_local_dir_sandbox_venv(self, sbx_config: SandboxConfig, env: Dict[str, s
except Exception as e:
logger.error(f"Executing tool {self.tool_name} has an unexpected error: {e}")
raise e


def run_local_dir_sandbox_runpy(
self, sbx_config: SandboxConfig, env_vars: Dict[str, str], temp_file_path: str, old_stdout: TextIO, old_stderr: TextIO
Expand All @@ -209,7 +208,6 @@ def run_local_dir_sandbox_runpy(
# Execute the temp file
with self.temporary_env_vars(env_vars):
result = runpy.run_path(temp_file_path, init_globals=env_vars)

# Fetch the result
func_result = result.get(self.LOCAL_SANDBOX_RESULT_VAR_NAME)
func_return, agent_state = self.parse_best_effort(func_result)
Expand All @@ -223,7 +221,7 @@ def run_local_dir_sandbox_runpy(
sys.stderr = old_stderr
stdout_output = [captured_stdout.getvalue()]
stderr_output = [captured_stderr.getvalue()]
stderr_output.append(error_msg if error_msg else '')
stderr_output.append(error_msg if error_msg else "")

return SandboxRunResult(
func_return=func_return,
Expand All @@ -235,7 +233,7 @@ def run_local_dir_sandbox_runpy(

def parse_out_function_results_markers(self, text: str):
if self.LOCAL_SANDBOX_RESULT_START_MARKER not in text:
return '', text
return "", text
marker_len = len(self.LOCAL_SANDBOX_RESULT_START_MARKER)
start_index = text.index(self.LOCAL_SANDBOX_RESULT_START_MARKER) + marker_len
end_index = text.index(self.LOCAL_SANDBOX_RESULT_END_MARKER)
Expand Down Expand Up @@ -283,8 +281,8 @@ def run_e2b_sandbox(self, agent_state: AgentState) -> SandboxRunResult:
func_return, agent_state = None, None
if execution.error is not None:
logger.error(f"Executing tool {self.tool_name} failed with {execution.error}")
execution.logs.stderr.append(execution.error.traceback)
execution.logs.stderr.append(f"{execution.error.name}: {execution.error.value}")
execution.logs.stderr.append(execution.error.traceback)
execution.logs.stderr.append(f"{execution.error.name}: {execution.error.value}")
elif len(execution.results) == 0:
raise ValueError(f"Tool {self.tool_name} returned execution with None")
else:
Expand Down
88 changes: 86 additions & 2 deletions tests/integration_test_offline_memory_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,16 @@
@pytest.fixture(scope="module")
def client():
client = create_client()
client.set_default_llm_config(LLMConfig.default_config("gpt-4o-mini"))
client.set_default_embedding_config(EmbeddingConfig.default_config(provider="openai"))
ANTHROPIC_CONFIG = LLMConfig(
model_endpoint_type="anthropic",
model_endpoint="https://api.anthropic.com/v1",
model="claude-3-5-haiku-20241022",
context_window=32000,
)
# client.set_default_llm_config(LLMConfig.default_config("gpt-4o-mini"))
# client.set_default_embedding_config(EmbeddingConfig.default_config(provider="openai"))
client.set_default_llm_config(ANTHROPIC_CONFIG)
client.set_default_embedding_config(EmbeddingConfig.default_config(model_name="letta"))

yield client

Expand All @@ -33,6 +41,82 @@ def clear_agents(client):
client.delete_agent(agent.id)


def test_ripple_edit_anthropic(client, mock_e2b_api_key_none):
trigger_rethink_memory_tool = client.create_or_update_tool(trigger_rethink_memory)

conversation_human_block = Block(name="human", label="human", value=get_human_text(DEFAULT_HUMAN), limit=2000)
conversation_persona_block = Block(name="persona", label="persona", value=get_persona_text(DEFAULT_PERSONA), limit=2000)
offline_human_block = Block(name="human", label="human", value=get_human_text(DEFAULT_HUMAN), limit=2000)
offline_persona_block = Block(name="persona", label="persona", value=get_persona_text("offline_memory_persona"), limit=2000)

# Figure 1. from Evaluating the Ripple Effects of Knowledge Editing in Language Models (Cohen et al., 2023)
# https://arxiv.org/pdf/2307.12976
fact_block = Block(
name="fact_block",
label="fact_block",
value="""Messi resides in the Paris.
Messi plays in the league Ligue 1.
Messi plays for the team Paris Saint-Germain.
The national team Messi plays for is the Argentina team.
Messi is also known as Leo Messi
Victor Ulloa plays for Inter Miami""",
limit=2000,
)

new_memory = Block(name="rethink_memory_block", label="rethink_memory_block", value="[empty]", limit=2000)
conversation_memory = BasicBlockMemory(blocks=[conversation_persona_block, conversation_human_block, fact_block, new_memory])
offline_memory = BasicBlockMemory(blocks=[offline_persona_block, offline_human_block, fact_block, new_memory])
#
# ANTHROPIC_CONFIG = LLMConfig(
# model_endpoint_type="anthropic",
# model_endpoint="https://api.anthropic.com/v1",
# model="claude-3-5-haiku-20241022",
# context_window=32000,
# )
conversation_agent = client.create_agent(
name="conversation_agent",
agent_type=AgentType.memgpt_agent,
system=gpt_system.get_system_text("memgpt_convo_only"),
# llm_config=ANTHROPIC_CONFIG,
# embedding_config=EmbeddingConfig.default_config("text-embedding-ada-002"),
tools=["send_message", trigger_rethink_memory_tool.name],
memory=conversation_memory,
include_base_tools=False,
)
assert conversation_agent is not None

assert set(conversation_agent.memory.list_block_labels()) == {"persona", "human", "fact_block", "rethink_memory_block"}

rethink_memory_tool = client.create_tool(rethink_memory)
finish_rethinking_memory_tool = client.create_tool(finish_rethinking_memory)
offline_memory_agent = client.create_agent(
name="offline_memory_agent",
agent_type=AgentType.offline_memory_agent,
system=gpt_system.get_system_text("memgpt_offline_memory"),
memory=offline_memory,
# llm_config=ANTHROPIC_CONFIG,
# embedding_config=EmbeddingConfig.default_config("text-embedding-ada-002"),
tools=[rethink_memory_tool.name, finish_rethinking_memory_tool.name],
tool_rules=[TerminalToolRule(tool_name=finish_rethinking_memory_tool.name)],
include_base_tools=False,
)
assert offline_memory_agent is not None
assert set(offline_memory_agent.memory.list_block_labels()) == {"persona", "human", "fact_block", "rethink_memory_block"}
response = client.user_message(
agent_id=conversation_agent.id, message="[trigger_rethink_memory]: Messi has now moved to playing for Inter Miami"
)
offline_memory_agent = client.get_agent(agent_id=offline_memory_agent.id)
assert offline_memory_agent.memory.get_block("rethink_memory_block").value != "[empty]"
conversation_agent = client.get_agent(agent_id=conversation_agent.id)
assert conversation_agent.memory.get_block("rethink_memory_block").value != "[empty]"

# Clean up agent
"""
client.delete_agent(conversation_agent.id)
client.delete_agent(offline_memory_agent.id)
"""


def test_ripple_edit(client, mock_e2b_api_key_none):
trigger_rethink_memory_tool = client.create_or_update_tool(trigger_rethink_memory)
send_message = client.server.tool_manager.get_tool_by_name(tool_name="send_message", actor=client.user)
Expand Down

0 comments on commit 420a6cc

Please sign in to comment.