diff --git a/letta/server/rest_api/routers/v1/sources.py b/letta/server/rest_api/routers/v1/sources.py index 6afc3b6889..fb48d125ca 100644 --- a/letta/server/rest_api/routers/v1/sources.py +++ b/letta/server/rest_api/routers/v1/sources.py @@ -130,9 +130,8 @@ def attach_source_to_agent( Attach a data source to an existing agent. """ actor = server.user_manager.get_user_or_default(user_id=user_id) - - source = server.agent_manager.attach_source(source_id=source_id, agent_id=agent_id, actor=actor) - return source + server.agent_manager.attach_source(source_id=source_id, agent_id=agent_id, actor=actor) + return server.source_manager.get_source_by_id(source_id=source_id, actor=actor) @router.post("/{source_id}/detach", response_model=Source, operation_id="detach_agent_from_source") @@ -146,7 +145,8 @@ def detach_source_from_agent( Detach a data source from an existing agent. """ actor = server.user_manager.get_user_or_default(user_id=user_id) - return server.agent_manager.detach_source(agent_id=agent_id, source_id=source_id, actor=actor) + server.agent_manager.detach_source(agent_id=agent_id, source_id=source_id, actor=actor) + return server.source_manager.get_source_by_id(source_id=source_id, actor=actor) @router.post("/{source_id}/upload", response_model=Job, operation_id="upload_file_to_source") diff --git a/tests/test_model_letta_perfomance.py b/tests/test_model_letta_perfomance.py index e473d5bb45..d45654eaaa 100644 --- a/tests/test_model_letta_perfomance.py +++ b/tests/test_model_letta_perfomance.py @@ -56,10 +56,35 @@ def wrapper(*args, **kwargs): return decorator_retry +def retry_until_success(max_attempts=10, sleep_time_seconds=4): + """ + Decorator to retry a function until it succeeds or the maximum number of attempts is reached. + + :param max_attempts: Maximum number of attempts to retry the function. + :param sleep_time_seconds: Time to wait between attempts, in seconds. + """ + + def decorator_retry(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + for attempt in range(1, max_attempts + 1): + try: + return func(*args, **kwargs) + except Exception as e: + print(f"\033[93mAttempt {attempt} failed with error:\n{e}\033[0m") + if attempt == max_attempts: + raise + time.sleep(sleep_time_seconds) + + return wrapper + + return decorator_retry + + # ====================================================================================================================== # OPENAI TESTS # ====================================================================================================================== -@retry_until_threshold(threshold=0.75, max_attempts=4) +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_openai_gpt_4o_returns_valid_first_message(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_first_response_is_valid_for_llm_endpoint(filename) @@ -67,6 +92,7 @@ def test_openai_gpt_4o_returns_valid_first_message(): print(f"Got successful response from client: \n\n{response}") +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_openai_gpt_4o_returns_keyword(): keyword = "banana" filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") @@ -75,6 +101,7 @@ def test_openai_gpt_4o_returns_keyword(): print(f"Got successful response from client: \n\n{response}") +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_openai_gpt_4o_uses_external_tool(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_uses_external_tool(filename) @@ -82,6 +109,7 @@ def test_openai_gpt_4o_uses_external_tool(): print(f"Got successful response from client: \n\n{response}") +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_openai_gpt_4o_recall_chat_memory(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_recall_chat_memory(filename) @@ -89,6 +117,7 @@ def test_openai_gpt_4o_recall_chat_memory(): print(f"Got successful response from client: \n\n{response}") +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_openai_gpt_4o_archival_memory_retrieval(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_archival_memory_retrieval(filename) @@ -96,6 +125,7 @@ def test_openai_gpt_4o_archival_memory_retrieval(): print(f"Got successful response from client: \n\n{response}") +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_openai_gpt_4o_archival_memory_insert(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_archival_memory_insert(filename) @@ -103,6 +133,7 @@ def test_openai_gpt_4o_archival_memory_insert(): print(f"Got successful response from client: \n\n{response}") +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_openai_gpt_4o_edit_core_memory(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_edit_core_memory(filename) @@ -110,12 +141,14 @@ def test_openai_gpt_4o_edit_core_memory(): print(f"Got successful response from client: \n\n{response}") +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_openai_gpt_4o_summarize_memory(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_summarize_memory_simple(filename) print(f"Got successful response from client: \n\n{response}") +@retry_until_success(max_attempts=5, sleep_time_seconds=2) def test_embedding_endpoint_openai(): filename = os.path.join(embedding_config_dir, "openai_embed.json") run_embedding_endpoint(filename)