diff --git a/letta/providers.py b/letta/providers.py index 3ff7214099..0d48659843 100644 --- a/letta/providers.py +++ b/letta/providers.py @@ -414,46 +414,49 @@ def list_llm_models(self) -> List[LLMConfig]: return configs def list_embedding_models(self) -> List[EmbeddingConfig]: - from letta.llm_api.openai import openai_get_model_list - - response = openai_get_model_list(self.base_url, api_key=self.api_key) - - # TogetherAI's response is missing the 'data' field - # assert "data" in response, f"OpenAI model query response missing 'data' field: {response}" - if "data" in response: - data = response["data"] - else: - data = response - - configs = [] - for model in data: - assert "id" in model, f"TogetherAI model missing 'id' field: {model}" - model_name = model["id"] - - if "context_length" in model: - # Context length is returned in OpenRouter as "context_length" - context_window_size = model["context_length"] - else: - context_window_size = self.get_model_context_window_size(model_name) - - if not context_window_size: - continue - - # TogetherAI includes the type, which we can use to filter out embedding models - if "type" in model and model["type"] not in ["embedding"]: - continue - - configs.append( - EmbeddingConfig( - embedding_model=model_name, - embedding_endpoint_type="openai", - embedding_endpoint=self.base_url, - embedding_dim=context_window_size, - embedding_chunk_size=300, # TODO: change? - ) - ) + # TODO renable once we figure out how to pass API keys through properly + return [] - return configs + # from letta.llm_api.openai import openai_get_model_list + + # response = openai_get_model_list(self.base_url, api_key=self.api_key) + + # # TogetherAI's response is missing the 'data' field + # # assert "data" in response, f"OpenAI model query response missing 'data' field: {response}" + # if "data" in response: + # data = response["data"] + # else: + # data = response + + # configs = [] + # for model in data: + # assert "id" in model, f"TogetherAI model missing 'id' field: {model}" + # model_name = model["id"] + + # if "context_length" in model: + # # Context length is returned in OpenRouter as "context_length" + # context_window_size = model["context_length"] + # else: + # context_window_size = self.get_model_context_window_size(model_name) + + # if not context_window_size: + # continue + + # # TogetherAI includes the type, which we can use to filter out embedding models + # if "type" in model and model["type"] not in ["embedding"]: + # continue + + # configs.append( + # EmbeddingConfig( + # embedding_model=model_name, + # embedding_endpoint_type="openai", + # embedding_endpoint=self.base_url, + # embedding_dim=context_window_size, + # embedding_chunk_size=300, # TODO: change? + # ) + # ) + + # return configs class GoogleAIProvider(Provider): diff --git a/tests/test_providers.py b/tests/test_providers.py index df5a26a0ce..228e33525b 100644 --- a/tests/test_providers.py +++ b/tests/test_providers.py @@ -72,7 +72,7 @@ def test_mistral(): def test_together(): - provider = TogetherProvider(api_key=os.getenv("TOGETHER_API_KEY")) + provider = TogetherProvider(api_key=os.getenv("TOGETHER_API_KEY"), default_prompt_formatter="chatml") models = provider.list_llm_models() print([m.model for m in models])