From 042293c543348a381543da2b8f1d40336d6328ae Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 22 Oct 2024 00:26:00 +0530 Subject: [PATCH 01/16] add: vertexai autopatch integration --- weave/integrations/vertexai/__init__.py | 0 weave/integrations/vertexai/vertexai_sdk.py | 99 +++++++++++++++++++++ weave/trace/autopatch.py | 4 + 3 files changed, 103 insertions(+) create mode 100644 weave/integrations/vertexai/__init__.py create mode 100644 weave/integrations/vertexai/vertexai_sdk.py diff --git a/weave/integrations/vertexai/__init__.py b/weave/integrations/vertexai/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/weave/integrations/vertexai/vertexai_sdk.py b/weave/integrations/vertexai/vertexai_sdk.py new file mode 100644 index 00000000000..b0ad47ee188 --- /dev/null +++ b/weave/integrations/vertexai/vertexai_sdk.py @@ -0,0 +1,99 @@ +import importlib +from functools import wraps +from typing import Any, Callable, Optional, TYPE_CHECKING +import weave +from weave.trace.patcher import MultiPatcher, SymbolPatcher +from weave.trace.op_extensions.accumulator import add_accumulator + + +if TYPE_CHECKING: + from vertexai.generative_models import GenerationResponse + + +def vertexai_accumulator( + acc: Optional["GenerationResponse"], value: "GenerationResponse" +) -> "GenerationResponse": + from vertexai.generative_models import GenerationResponse + from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types + from google.cloud.aiplatform_v1beta1.types import prediction_service as gapic_prediction_service_types + + if acc is None: + return value + + candidates = [] + for i, value_candidate in enumerate(value.candidates): + accumulated_texts = [] + for j, value_part in enumerate(value_candidate.content.parts): + accumulated_text = acc.candidates[i].content.parts[j].text + value_part.text + accumulated_texts.append(accumulated_text) + parts = [gapic_content_types.Part(text=text) for text in accumulated_texts] + content = gapic_content_types.Content( + role=value_candidate.content.role, parts=parts + ) + candidate = gapic_content_types.Candidate(content=content) + candidates.append(candidate) + accumulated_response = gapic_prediction_service_types.GenerateContentResponse(candidates=candidates) + acc = GenerationResponse._from_gapic(accumulated_response) + + acc.usage_metadata.prompt_token_count += value.usage_metadata.prompt_token_count + acc.usage_metadata.candidates_token_count += ( + value.usage_metadata.candidates_token_count + ) + acc.usage_metadata.total_token_count += value.usage_metadata.total_token_count + return acc + + +def vertexai_wrapper_sync(name: str) -> Callable[[Callable], Callable]: + def wrapper(fn: Callable) -> Callable: + op = weave.op()(fn) + op.name = name # type: ignore + return add_accumulator( + op, # type: ignore + make_accumulator=lambda inputs: vertexai_accumulator, + should_accumulate=lambda inputs: isinstance(inputs, dict) + and bool(inputs.get("stream")), + ) + + return wrapper + + +def vertexai_wrapper_async(name: str) -> Callable[[Callable], Callable]: + def wrapper(fn: Callable) -> Callable: + def _fn_wrapper(fn: Callable) -> Callable: + @wraps(fn) + async def _async_wrapper(*args: Any, **kwargs: Any) -> Any: + return await fn(*args, **kwargs) + + return _async_wrapper + + "We need to do this so we can check if `stream` is used" + op = weave.op()(_fn_wrapper(fn)) + op.name = name # type: ignore + return add_accumulator( + op, # type: ignore + make_accumulator=lambda inputs: vertexai_accumulator, + should_accumulate=lambda inputs: isinstance(inputs, dict) + and bool(inputs.get("stream")), + ) + + return wrapper + + +vertexai_patcher = MultiPatcher( + [ + SymbolPatcher( + lambda: importlib.import_module("vertexai.generative_models"), + "GenerativeModel.generate_content", + vertexai_wrapper_sync( + name="vertexai.GenerativeModel.generate_content" + ), + ), + SymbolPatcher( + lambda: importlib.import_module("vertexai.generative_models"), + "GenerativeModel.generate_content_async", + vertexai_wrapper_async( + name="vertexai.GenerativeModel.generate_content_async" + ), + ), + ] +) diff --git a/weave/trace/autopatch.py b/weave/trace/autopatch.py index de37d951032..3a5dca14556 100644 --- a/weave/trace/autopatch.py +++ b/weave/trace/autopatch.py @@ -21,6 +21,7 @@ def autopatch() -> None: from weave.integrations.mistral import mistral_patcher from weave.integrations.notdiamond.tracing import notdiamond_patcher from weave.integrations.openai.openai_sdk import openai_patcher + from weave.integrations.vertexai.vertexai_sdk import vertexai_patcher openai_patcher.attempt_patch() mistral_patcher.attempt_patch() @@ -35,6 +36,7 @@ def autopatch() -> None: cohere_patcher.attempt_patch() google_genai_patcher.attempt_patch() notdiamond_patcher.attempt_patch() + vertexai_patcher.attempt_patch() def reset_autopatch() -> None: @@ -53,6 +55,7 @@ def reset_autopatch() -> None: from weave.integrations.mistral import mistral_patcher from weave.integrations.notdiamond.tracing import notdiamond_patcher from weave.integrations.openai.openai_sdk import openai_patcher + from weave.integrations.vertexai.vertexai_sdk import vertexai_patcher openai_patcher.undo_patch() mistral_patcher.undo_patch() @@ -67,3 +70,4 @@ def reset_autopatch() -> None: cohere_patcher.undo_patch() google_genai_patcher.undo_patch() notdiamond_patcher.undo_patch() + vertexai_patcher.undo_patch() From 42a257b0895eaed7a270ede6fe1634f6858ee05c Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 22 Oct 2024 00:29:24 +0530 Subject: [PATCH 02/16] fix: lint --- weave/integrations/vertexai/vertexai_sdk.py | 22 +++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/weave/integrations/vertexai/vertexai_sdk.py b/weave/integrations/vertexai/vertexai_sdk.py index b0ad47ee188..98d4b87821d 100644 --- a/weave/integrations/vertexai/vertexai_sdk.py +++ b/weave/integrations/vertexai/vertexai_sdk.py @@ -1,10 +1,10 @@ import importlib from functools import wraps -from typing import Any, Callable, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Callable, Optional + import weave -from weave.trace.patcher import MultiPatcher, SymbolPatcher from weave.trace.op_extensions.accumulator import add_accumulator - +from weave.trace.patcher import MultiPatcher, SymbolPatcher if TYPE_CHECKING: from vertexai.generative_models import GenerationResponse @@ -13,10 +13,12 @@ def vertexai_accumulator( acc: Optional["GenerationResponse"], value: "GenerationResponse" ) -> "GenerationResponse": - from vertexai.generative_models import GenerationResponse from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types - from google.cloud.aiplatform_v1beta1.types import prediction_service as gapic_prediction_service_types - + from google.cloud.aiplatform_v1beta1.types import ( + prediction_service as gapic_prediction_service_types, + ) + from vertexai.generative_models import GenerationResponse + if acc is None: return value @@ -32,7 +34,9 @@ def vertexai_accumulator( ) candidate = gapic_content_types.Candidate(content=content) candidates.append(candidate) - accumulated_response = gapic_prediction_service_types.GenerateContentResponse(candidates=candidates) + accumulated_response = gapic_prediction_service_types.GenerateContentResponse( + candidates=candidates + ) acc = GenerationResponse._from_gapic(accumulated_response) acc.usage_metadata.prompt_token_count += value.usage_metadata.prompt_token_count @@ -84,9 +88,7 @@ async def _async_wrapper(*args: Any, **kwargs: Any) -> Any: SymbolPatcher( lambda: importlib.import_module("vertexai.generative_models"), "GenerativeModel.generate_content", - vertexai_wrapper_sync( - name="vertexai.GenerativeModel.generate_content" - ), + vertexai_wrapper_sync(name="vertexai.GenerativeModel.generate_content"), ), SymbolPatcher( lambda: importlib.import_module("vertexai.generative_models"), From 57e414c3eb9eca9fa1239a9dacfa5b812007f30d Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 22 Oct 2024 00:40:17 +0530 Subject: [PATCH 03/16] add: vertexai_on_finish for handing token count, pricing, and execution time --- weave/integrations/vertexai/vertexai_sdk.py | 22 +++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/weave/integrations/vertexai/vertexai_sdk.py b/weave/integrations/vertexai/vertexai_sdk.py index 98d4b87821d..bd92fd18531 100644 --- a/weave/integrations/vertexai/vertexai_sdk.py +++ b/weave/integrations/vertexai/vertexai_sdk.py @@ -5,6 +5,7 @@ import weave from weave.trace.op_extensions.accumulator import add_accumulator from weave.trace.patcher import MultiPatcher, SymbolPatcher +from weave.trace.weave_client import Call if TYPE_CHECKING: from vertexai.generative_models import GenerationResponse @@ -47,10 +48,30 @@ def vertexai_accumulator( return acc +def vertexai_on_finish( + call: Call, output: Any, exception: Optional[BaseException] +) -> None: + original_model_name = call.inputs["self"]._model_name + model_name = original_model_name.split("/")[-1] + usage = {model_name: {"requests": 1}} + summary_update = {"usage": usage} + if output: + usage[model_name].update( + { + "prompt_tokens": output.usage_metadata.prompt_token_count, + "completion_tokens": output.usage_metadata.candidates_token_count, + "total_tokens": output.usage_metadata.total_token_count, + } + ) + if call.summary is not None: + call.summary.update(summary_update) + + def vertexai_wrapper_sync(name: str) -> Callable[[Callable], Callable]: def wrapper(fn: Callable) -> Callable: op = weave.op()(fn) op.name = name # type: ignore + op._set_on_finish_handler(vertexai_on_finish) return add_accumulator( op, # type: ignore make_accumulator=lambda inputs: vertexai_accumulator, @@ -73,6 +94,7 @@ async def _async_wrapper(*args: Any, **kwargs: Any) -> Any: "We need to do this so we can check if `stream` is used" op = weave.op()(_fn_wrapper(fn)) op.name = name # type: ignore + op._set_on_finish_handler(vertexai_on_finish) return add_accumulator( op, # type: ignore make_accumulator=lambda inputs: vertexai_accumulator, From fc651d35f9adfc95dcb4ad584e45d1a6c94e7098 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 22 Oct 2024 01:06:30 +0530 Subject: [PATCH 04/16] add: tests for vertexai integration --- .github/workflows/test.yaml | 1 + noxfile.py | 1 + pyproject.toml | 1 + tests/integrations/vertexai/vertexai_test.py | 105 +++++++++++++++++++ 4 files changed, 108 insertions(+) create mode 100644 tests/integrations/vertexai/vertexai_test.py diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 09f51268744..b363a657db7 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -240,6 +240,7 @@ jobs: 'mistral1', 'notdiamond', 'openai', + 'vertexai', ] fail-fast: false services: diff --git a/noxfile.py b/noxfile.py index 22a84e0adb1..9d46d76b9d5 100644 --- a/noxfile.py +++ b/noxfile.py @@ -40,6 +40,7 @@ def lint(session): "mistral1", "notdiamond", "openai", + "vertexai", ], ) def tests(session, shard): diff --git a/pyproject.toml b/pyproject.toml index 3bf7dbf601d..921e876c4df 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,6 +69,7 @@ mistral1 = ["mistralai>=1.0.0"] notdiamond = ["notdiamond>=0.3.21", "litellm<=1.49.1"] openai = ["openai>=1.0.0"] modal = ["modal", "python-dotenv"] +vertexai = ["vertexai>=1.70.0"] test = [ "nox", "pytest>=8.2.0", diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py new file mode 100644 index 00000000000..5663ab588c9 --- /dev/null +++ b/tests/integrations/vertexai/vertexai_test.py @@ -0,0 +1,105 @@ +import os + +import pytest + +from weave.integrations.integration_utilities import op_name_from_ref + + +@pytest.mark.retry(max_attempts=5) +@pytest.mark.skip_clickhouse_client +def test_content_generation(client): + import vertexai + from vertexai.generative_models import GenerativeModel + + vertexai.init(project="wandb-growth", location="us-central1") + model = GenerativeModel("gemini-1.5-flash") + model.generate_content("Explain how AI works in simple terms") + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + + trace_name = op_name_from_ref(call.op_name) + assert trace_name == "vertexai.GenerativeModel.generate_content" + assert call.output is not None + + +@pytest.mark.retry(max_attempts=5) +@pytest.mark.skip_clickhouse_client +def test_content_generation_stream(client): + import vertexai + from vertexai.generative_models import GenerativeModel + + vertexai.init(project="wandb-growth", location="us-central1") + model = GenerativeModel("gemini-1.5-flash") + response = model.generate_content( + "Explain how AI works in simple terms", stream=True + ) + chunks = [chunk.text for chunk in response] + assert len(chunks) > 1 + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + + trace_name = op_name_from_ref(call.op_name) + assert trace_name == "vertexai.GenerativeModel.generate_content" + assert call.output is not None + + +@pytest.mark.retry(max_attempts=5) +@pytest.mark.asyncio +@pytest.mark.skip_clickhouse_client +async def test_content_generation_async(client): + import vertexai + from vertexai.generative_models import GenerativeModel + + vertexai.init(project="wandb-growth", location="us-central1") + model = GenerativeModel("gemini-1.5-flash") + _ = await model.generate_content_async("Explain how AI works in simple terms") + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + + trace_name = op_name_from_ref(call.op_name) + assert trace_name == "vertexai.GenerativeModel.generate_content_async" + assert call.output is not None + + +@pytest.mark.retry(max_attempts=5) +@pytest.mark.asyncio +@pytest.mark.skip_clickhouse_client +async def test_content_generation_async_stream(client): + import vertexai + from vertexai.generative_models import GenerativeModel + + vertexai.init(project="wandb-growth", location="us-central1") + model = GenerativeModel("gemini-1.5-flash") + + async def get_response(): + chunks = [] + async for chunk in await model.generate_content_async( + "Explain how AI works in simple terms", stream=True + ): + if chunk.text: + chunks.append(chunk.text) + return chunks + + _ = await get_response() + + calls = list(client.calls()) + assert len(calls) == 1 + + call = calls[0] + assert call.started_at < call.ended_at + + trace_name = op_name_from_ref(call.op_name) + assert trace_name == "vertexai.GenerativeModel.generate_content_async" + assert call.output is not None From 46e438fa2228a07f0b9c3512572eeecb9fef88f6 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 22 Oct 2024 01:58:28 +0530 Subject: [PATCH 05/16] fix: lint --- tests/integrations/vertexai/vertexai_test.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index 5663ab588c9..657009aa906 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -1,5 +1,3 @@ -import os - import pytest from weave.integrations.integration_utilities import op_name_from_ref From 6bb2d760f4061e05b8c690c94223ad9595f8d1c3 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Mon, 28 Oct 2024 08:17:02 +0530 Subject: [PATCH 06/16] add: patch for ImageGenerationModel.generate_images --- weave/integrations/vertexai/vertexai_sdk.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/weave/integrations/vertexai/vertexai_sdk.py b/weave/integrations/vertexai/vertexai_sdk.py index bd92fd18531..348c9760ac4 100644 --- a/weave/integrations/vertexai/vertexai_sdk.py +++ b/weave/integrations/vertexai/vertexai_sdk.py @@ -119,5 +119,10 @@ async def _async_wrapper(*args: Any, **kwargs: Any) -> Any: name="vertexai.GenerativeModel.generate_content_async" ), ), + SymbolPatcher( + lambda: importlib.import_module("vertexai.preview.vision_models"), + "ImageGenerationModel.generate_images", + vertexai_wrapper_sync(name="vertexai.ImageGenerationModel.generate_images"), + ), ] ) From 231ade12d59853c6f12e9724e77e78b9b628f5c3 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 29 Oct 2024 02:59:11 +0530 Subject: [PATCH 07/16] update: tests --- tests/integrations/vertexai/vertexai_test.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index 657009aa906..258a2be07b7 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -11,7 +11,7 @@ def test_content_generation(client): vertexai.init(project="wandb-growth", location="us-central1") model = GenerativeModel("gemini-1.5-flash") - model.generate_content("Explain how AI works in simple terms") + model.generate_content("What is the capital of France?") calls = list(client.calls()) assert len(calls) == 1 @@ -21,7 +21,7 @@ def test_content_generation(client): trace_name = op_name_from_ref(call.op_name) assert trace_name == "vertexai.GenerativeModel.generate_content" - assert call.output is not None + assert "paris" in str(call.output).lower() @pytest.mark.retry(max_attempts=5) @@ -33,7 +33,7 @@ def test_content_generation_stream(client): vertexai.init(project="wandb-growth", location="us-central1") model = GenerativeModel("gemini-1.5-flash") response = model.generate_content( - "Explain how AI works in simple terms", stream=True + "What is the capital of France?", stream=True ) chunks = [chunk.text for chunk in response] assert len(chunks) > 1 @@ -46,7 +46,7 @@ def test_content_generation_stream(client): trace_name = op_name_from_ref(call.op_name) assert trace_name == "vertexai.GenerativeModel.generate_content" - assert call.output is not None + assert "paris" in str(call.output).lower() @pytest.mark.retry(max_attempts=5) @@ -58,7 +58,7 @@ async def test_content_generation_async(client): vertexai.init(project="wandb-growth", location="us-central1") model = GenerativeModel("gemini-1.5-flash") - _ = await model.generate_content_async("Explain how AI works in simple terms") + _ = await model.generate_content_async("What is the capital of France?") calls = list(client.calls()) assert len(calls) == 1 @@ -68,7 +68,7 @@ async def test_content_generation_async(client): trace_name = op_name_from_ref(call.op_name) assert trace_name == "vertexai.GenerativeModel.generate_content_async" - assert call.output is not None + assert "paris" in str(call.output).lower() @pytest.mark.retry(max_attempts=5) @@ -84,7 +84,7 @@ async def test_content_generation_async_stream(client): async def get_response(): chunks = [] async for chunk in await model.generate_content_async( - "Explain how AI works in simple terms", stream=True + "What is the capital of France?", stream=True ): if chunk.text: chunks.append(chunk.text) @@ -100,4 +100,4 @@ async def get_response(): trace_name = op_name_from_ref(call.op_name) assert trace_name == "vertexai.GenerativeModel.generate_content_async" - assert call.output is not None + assert "paris" in str(call.output).lower() From a40d4e13be92e4790bff5df0670c5e3322e063b2 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 29 Oct 2024 03:03:42 +0530 Subject: [PATCH 08/16] update: cassettes --- .../test_content_generation.yaml | 59 +++++++++++++++++++ .../test_content_generation_stream.yaml | 59 +++++++++++++++++++ tests/integrations/vertexai/vertexai_test.py | 16 +++++ 3 files changed, 134 insertions(+) create mode 100644 tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation.yaml create mode 100644 tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_stream.yaml diff --git a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation.yaml b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation.yaml new file mode 100644 index 00000000000..b97e1b3ea3d --- /dev/null +++ b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation.yaml @@ -0,0 +1,59 @@ +interactions: +- request: + body: grant_type=refresh_token&client_id=764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com&client_secret=d-FL95Q19q7MQmFpd7hHD0Ty&refresh_token=1%2F%2F0gLiyt963b8sOCgYIARAAGBASNwF-L9Ir7IWWSZUm-v0s2idDS4EYVX45KS8GZ0v9e0Ykjq8V8oMGXrcCohA6WFBUlBNSpkdOH_Y + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '268' + Content-Type: + - application/x-www-form-urlencoded + User-Agent: + - python-requests/2.32.3 + x-goog-api-client: + - gl-python/3.9.12 auth/2.35.0 cred-type/u + method: POST + uri: https://oauth2.googleapis.com/token + response: + body: + string: "{\n \"access_token\": \"ya29.a0AeDClZBelAx2FDCO9JlblsGuPjsHSW5rx4MyF9nZPH7RCoRnxTLfkPNT-cdst5JH9wJ9KQ7mEENoL6oh9v9Jlnu5ood7_BiZc_jr2xiooG_cFn46FpoRt527-fIgAVNK0ZrRR_nwQisqLlM8C5UHfbN8flTVnzPChcbDT1hJlwaCgYKAfUSARESFQHGX2Mi0EYI27EDFa1piqKlnYadLg0177\",\n + \ \"expires_in\": 3599,\n \"scope\": \"https://www.googleapis.com/auth/userinfo.email + https://www.googleapis.com/auth/sqlservice.login openid https://www.googleapis.com/auth/cloud-platform\",\n + \ \"token_type\": \"Bearer\",\n \"id_token\": \"eyJhbGciOiJSUzI1NiIsImtpZCI6ImM4OGQ4MDlmNGRiOTQzZGY1M2RhN2FjY2ZkNDc3NjRkMDViYTM5MWYiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJhdWQiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMTI5OTYyMDUzOTczMDQ4ODIzNTciLCJoZCI6IndhbmRiLmNvbSIsImVtYWlsIjoic291bWlrLnJha3NoaXRAd2FuZGIuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJ5MlVJWXZkVmhhSXBFQldGdWFhSGRRIiwiaWF0IjoxNzMwMTUxMDc0LCJleHAiOjE3MzAxNTQ2NzR9.RdVJ0QIyEt3YgpCwzQaIy-yhAfsd5wgm5lQvyPowbRRU39hEIydyVzR2UokywCf_k6m8xG7Kn6E66EifgxK-4Po088aHvhf7_yao1PCRDxKsLNpn0bHLJq5ymVvndKJMkvndySrBM2eByY7vTTbQU5emusvFK9XZmp2l4HBRNIpt8a_6vXQiIjBnjxaCGArVeRaZ3AViwN8J0Bwy5QdPZGvKDjVZiGlb7lTPHedQ-Lo4eUXuW83xUDW2WgeCx-e-GKg_oCeGd0rwE18Pz4Sm17y-SUQKMMrdwB8HGD_VpK8TKmoEsOr8n3t2-G31ua0wmG9qMlTzYrY8f3Kn8C-oVQ\"\n}" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Cache-Control: + - no-cache, no-store, max-age=0, must-revalidate + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=utf-8 + Date: + - Mon, 28 Oct 2024 21:31:14 GMT + Expires: + - Mon, 01 Jan 1990 00:00:00 GMT + Pragma: + - no-cache + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_stream.yaml b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_stream.yaml new file mode 100644 index 00000000000..da514f00c69 --- /dev/null +++ b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_stream.yaml @@ -0,0 +1,59 @@ +interactions: +- request: + body: grant_type=refresh_token&client_id=764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com&client_secret=d-FL95Q19q7MQmFpd7hHD0Ty&refresh_token=1%2F%2F0gLiyt963b8sOCgYIARAAGBASNwF-L9Ir7IWWSZUm-v0s2idDS4EYVX45KS8GZ0v9e0Ykjq8V8oMGXrcCohA6WFBUlBNSpkdOH_Y + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '268' + Content-Type: + - application/x-www-form-urlencoded + User-Agent: + - python-requests/2.32.3 + x-goog-api-client: + - gl-python/3.9.12 auth/2.35.0 cred-type/u + method: POST + uri: https://oauth2.googleapis.com/token + response: + body: + string: "{\n \"access_token\": \"ya29.a0AeDClZDPI3uAyf9H7JW4UIb9JW_kMHCCJqkDGc6w8roBW619IiOJ1UzyUieWpy1ayyRGyTwaXEIDYABTxwIlSjfWwcsRrfbidnkqMBCAy0XWgMfdnMfRUy4P_nqFCLz-dsfUYXXhCm8i8ZyfvUiaNF506xoNDCt02UsjLqFJhAaCgYKATESARESFQHGX2MiJcXnY1oyo2KG_7hFtnjQoA0177\",\n + \ \"expires_in\": 3599,\n \"scope\": \"https://www.googleapis.com/auth/cloud-platform + https://www.googleapis.com/auth/userinfo.email openid https://www.googleapis.com/auth/sqlservice.login\",\n + \ \"token_type\": \"Bearer\",\n \"id_token\": \"eyJhbGciOiJSUzI1NiIsImtpZCI6ImM4OGQ4MDlmNGRiOTQzZGY1M2RhN2FjY2ZkNDc3NjRkMDViYTM5MWYiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJhdWQiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMTI5OTYyMDUzOTczMDQ4ODIzNTciLCJoZCI6IndhbmRiLmNvbSIsImVtYWlsIjoic291bWlrLnJha3NoaXRAd2FuZGIuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJFcTFRUXphQUMxNm9EZEhpTGZRX2tBIiwiaWF0IjoxNzMwMTUxMTM2LCJleHAiOjE3MzAxNTQ3MzZ9.GZoOYdP9uVjPQXkCgA5zgFbZ-ZcxmVZD5bhiKQ6T2sHKtACc0dh9hEOui1oG4eTD-VTUFNjIUEtl_czmO6RXo4iBbeFNEhIjUuTWA6Kjgd6tzqlB1QKHzrRNnJas0kjLQHEtt7JZpbQ6ccSC4388cNvPwidUJsZqGwqp9wTVJxvQFbrCJF0YcaPkUKZTE-eqO4-ezTyR3SwDbm3jEFsd1oHhyrIwyPr7BSp43iTisBxe7h1Rtv0gpY1iLVTXU0BvLKzbespzX_sz4ur90O2e8Ar2zGv9DKYEEd3JL34FptU37wLMYABW_eUMYFgSGmys0YBM8LjbEHV1cproZiJUTw\"\n}" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Cache-Control: + - no-cache, no-store, max-age=0, must-revalidate + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=utf-8 + Date: + - Mon, 28 Oct 2024 21:32:16 GMT + Expires: + - Mon, 01 Jan 1990 00:00:00 GMT + Pragma: + - no-cache + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index 258a2be07b7..fbbc27ef780 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -5,6 +5,10 @@ @pytest.mark.retry(max_attempts=5) @pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) def test_content_generation(client): import vertexai from vertexai.generative_models import GenerativeModel @@ -26,6 +30,10 @@ def test_content_generation(client): @pytest.mark.retry(max_attempts=5) @pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) def test_content_generation_stream(client): import vertexai from vertexai.generative_models import GenerativeModel @@ -52,6 +60,10 @@ def test_content_generation_stream(client): @pytest.mark.retry(max_attempts=5) @pytest.mark.asyncio @pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) async def test_content_generation_async(client): import vertexai from vertexai.generative_models import GenerativeModel @@ -74,6 +86,10 @@ async def test_content_generation_async(client): @pytest.mark.retry(max_attempts=5) @pytest.mark.asyncio @pytest.mark.skip_clickhouse_client +@pytest.mark.vcr( + filter_headers=["authorization"], + allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], +) async def test_content_generation_async_stream(client): import vertexai from vertexai.generative_models import GenerativeModel From 3f62601591f39f0d3eba3dde0e7c1c72fb30d7f6 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 29 Oct 2024 03:09:55 +0530 Subject: [PATCH 09/16] fix: lint --- tests/integrations/vertexai/vertexai_test.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index fbbc27ef780..943d844758b 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -40,9 +40,7 @@ def test_content_generation_stream(client): vertexai.init(project="wandb-growth", location="us-central1") model = GenerativeModel("gemini-1.5-flash") - response = model.generate_content( - "What is the capital of France?", stream=True - ) + response = model.generate_content("What is the capital of France?", stream=True) chunks = [chunk.text for chunk in response] assert len(chunks) > 1 From fe9d10ae54f3b97078dcc275194cfb2e0d963016 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Wed, 30 Oct 2024 10:25:33 +0530 Subject: [PATCH 10/16] add: cassettes for async cases --- .../test_content_generation_async.yaml | 59 +++++++++++++++++++ .../test_content_generation_async_stream.yaml | 59 +++++++++++++++++++ tests/integrations/vertexai/vertexai_test.py | 12 ++-- 3 files changed, 124 insertions(+), 6 deletions(-) create mode 100644 tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async.yaml create mode 100644 tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async_stream.yaml diff --git a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async.yaml b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async.yaml new file mode 100644 index 00000000000..127cbfc7455 --- /dev/null +++ b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async.yaml @@ -0,0 +1,59 @@ +interactions: +- request: + body: grant_type=refresh_token&client_id=764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com&client_secret=d-FL95Q19q7MQmFpd7hHD0Ty&refresh_token=1%2F%2F0gLiyt963b8sOCgYIARAAGBASNwF-L9Ir7IWWSZUm-v0s2idDS4EYVX45KS8GZ0v9e0Ykjq8V8oMGXrcCohA6WFBUlBNSpkdOH_Y + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '268' + Content-Type: + - application/x-www-form-urlencoded + User-Agent: + - python-requests/2.32.3 + x-goog-api-client: + - gl-python/3.9.12 auth/2.35.0 cred-type/u + method: POST + uri: https://oauth2.googleapis.com/token + response: + body: + string: "{\n \"access_token\": \"ya29.a0AeDClZBIaRvWVOj-lnnQHc0YShBfhL67Y57maRDl9ZDGGOpyq7xDxGtTXDxIBo1SI559lJuE797LzI2n0xz-uZhq7xr4FxVzP_WtRggMybnpeLyg7FgrZ3Gyy2k9yuc407HKvsWj26DMoycri2BnMtaLsoeqsaEwKh5P4wZ96AaCgYKAcESARESFQHGX2Milar5gawtvBtJf_wx1jn0qA0177\",\n + \ \"expires_in\": 3599,\n \"scope\": \"https://www.googleapis.com/auth/cloud-platform + openid https://www.googleapis.com/auth/sqlservice.login https://www.googleapis.com/auth/userinfo.email\",\n + \ \"token_type\": \"Bearer\",\n \"id_token\": \"eyJhbGciOiJSUzI1NiIsImtpZCI6ImM4OGQ4MDlmNGRiOTQzZGY1M2RhN2FjY2ZkNDc3NjRkMDViYTM5MWYiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJhdWQiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMTI5OTYyMDUzOTczMDQ4ODIzNTciLCJoZCI6IndhbmRiLmNvbSIsImVtYWlsIjoic291bWlrLnJha3NoaXRAd2FuZGIuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJ3b1FiM29pWm1qbEdqb0dkMlV4cExnIiwiaWF0IjoxNzMwMjY0MDIxLCJleHAiOjE3MzAyNjc2MjF9.T7iii2t706z62Wwgbc9jjaPpiacYIK3_pITHSjxbxpJT0ckAI0Ah3VsHSpwgWehlXb1_qnmNYQFIBOsd2iiWN_wuCVGWaHUZOlQPxBrzvB15qyIGQ5WR4QnDa8t29Mqb8wnukPuvUxqBfol5PDTcjT2p3SP22Kk5dtZ3OULDEsi_qx2CVv-jmG149pjIelZlKAWiQMjXZTQyhl4Yl8JaqCOOUSN7g-ePec3pC0G_ceoH1IazKAOWnlBWnv9lmssMQJgNX884qiKtP1HLbpnWHqX68wDPHyFZK6Ur8vJWaFt5C085a3dbXG_4lBeQlb9cWMSl6hL_TQP4caUsNtjTkQ\"\n}" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Cache-Control: + - no-cache, no-store, max-age=0, must-revalidate + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=utf-8 + Date: + - Wed, 30 Oct 2024 04:53:41 GMT + Expires: + - Mon, 01 Jan 1990 00:00:00 GMT + Pragma: + - no-cache + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async_stream.yaml b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async_stream.yaml new file mode 100644 index 00000000000..60ef5410cd3 --- /dev/null +++ b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async_stream.yaml @@ -0,0 +1,59 @@ +interactions: +- request: + body: grant_type=refresh_token&client_id=764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com&client_secret=d-FL95Q19q7MQmFpd7hHD0Ty&refresh_token=1%2F%2F0gLiyt963b8sOCgYIARAAGBASNwF-L9Ir7IWWSZUm-v0s2idDS4EYVX45KS8GZ0v9e0Ykjq8V8oMGXrcCohA6WFBUlBNSpkdOH_Y + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, br, zstd + Connection: + - keep-alive + Content-Length: + - '268' + Content-Type: + - application/x-www-form-urlencoded + User-Agent: + - python-requests/2.32.3 + x-goog-api-client: + - gl-python/3.9.12 auth/2.35.0 cred-type/u + method: POST + uri: https://oauth2.googleapis.com/token + response: + body: + string: "{\n \"access_token\": \"ya29.a0AeDClZB3Y3dmQNSdSh1JquBqC6YlhbgHqCix4civq2GYwDk8SppuSx853hJSZzBqW0kI_Lq1lqnxcE7EgGoqF3tZS9EGK8V8esMork6ii_qx__C4ac8V7Jobm_C7t9lmvSbmqED1OO1A887bxSFQf8wKS-JGoHCqOnvXa1Xj0gaCgYKAWoSARESFQHGX2MiE1fTNI5Pq9FRb6hkD7SIfg0177\",\n + \ \"expires_in\": 3599,\n \"scope\": \"https://www.googleapis.com/auth/sqlservice.login + https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/cloud-platform + openid\",\n \"token_type\": \"Bearer\",\n \"id_token\": \"eyJhbGciOiJSUzI1NiIsImtpZCI6ImM4OGQ4MDlmNGRiOTQzZGY1M2RhN2FjY2ZkNDc3NjRkMDViYTM5MWYiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJhdWQiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMTI5OTYyMDUzOTczMDQ4ODIzNTciLCJoZCI6IndhbmRiLmNvbSIsImVtYWlsIjoic291bWlrLnJha3NoaXRAd2FuZGIuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJXVXBVQjZLaEJZSnJ2ZGdTa2hUSE1nIiwiaWF0IjoxNzMwMjY0MDc1LCJleHAiOjE3MzAyNjc2NzV9.si22Fdiud05WQglgEx6TLCFz7WLhnyFhLszlm6-G1wa4roteWRQufet4GIXhUXjDdWGLoMP27SffPXFoJ6tT4j4BHWnh7bndM2zWCtMAtub2fDkL7wBIwjarI3Xe8mCo9Zqv0FgDTGbxqIQJ33FKBr2d99gfYGJ6dYQznyy6OonuHEUYv_q8XD5s9oMDJ3ejpLDyGhx4pOI7RIiKSlyiszrCAtJdzqFZvKci8Ak1xfOCmrCN74nGOdD8PobDgaT7ZDkfqGLs7OXNuW-6S0hNNRSFadG3Zi9TTakxlcYeojlZnpzxuS9uaObAp8oDDuZ68TC4c0ajMjmLblfXlKfI_w\"\n}" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Cache-Control: + - no-cache, no-store, max-age=0, must-revalidate + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=utf-8 + Date: + - Wed, 30 Oct 2024 04:54:35 GMT + Expires: + - Mon, 01 Jan 1990 00:00:00 GMT + Pragma: + - no-cache + Server: + - scaffolding on HTTPServer2 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index 943d844758b..852a746e05f 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -1,3 +1,5 @@ +import asyncio + import pytest from weave.integrations.integration_utilities import op_name_from_ref @@ -56,19 +58,18 @@ def test_content_generation_stream(client): @pytest.mark.retry(max_attempts=5) -@pytest.mark.asyncio @pytest.mark.skip_clickhouse_client @pytest.mark.vcr( filter_headers=["authorization"], allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], ) -async def test_content_generation_async(client): +def test_content_generation_async(client): import vertexai from vertexai.generative_models import GenerativeModel vertexai.init(project="wandb-growth", location="us-central1") model = GenerativeModel("gemini-1.5-flash") - _ = await model.generate_content_async("What is the capital of France?") + asyncio.run(model.generate_content_async("What is the capital of France?")) calls = list(client.calls()) assert len(calls) == 1 @@ -82,13 +83,12 @@ async def test_content_generation_async(client): @pytest.mark.retry(max_attempts=5) -@pytest.mark.asyncio @pytest.mark.skip_clickhouse_client @pytest.mark.vcr( filter_headers=["authorization"], allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], ) -async def test_content_generation_async_stream(client): +def test_content_generation_async_stream(client): import vertexai from vertexai.generative_models import GenerativeModel @@ -104,7 +104,7 @@ async def get_response(): chunks.append(chunk.text) return chunks - _ = await get_response() + asyncio.run(get_response()) calls = list(client.calls()) assert len(calls) == 1 From c6a1274ea81dfcc8c86f5cdd1d26fa650b34abcf Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 5 Nov 2024 02:38:16 +0530 Subject: [PATCH 11/16] update: tests --- .../test_content_generation.yaml | 59 ------------------- .../test_content_generation_async.yaml | 59 ------------------- .../test_content_generation_async_stream.yaml | 59 ------------------- .../test_content_generation_stream.yaml | 59 ------------------- tests/integrations/vertexai/vertexai_test.py | 26 ++------ 5 files changed, 6 insertions(+), 256 deletions(-) delete mode 100644 tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation.yaml delete mode 100644 tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async.yaml delete mode 100644 tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async_stream.yaml delete mode 100644 tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_stream.yaml diff --git a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation.yaml b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation.yaml deleted file mode 100644 index b97e1b3ea3d..00000000000 --- a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation.yaml +++ /dev/null @@ -1,59 +0,0 @@ -interactions: -- request: - body: grant_type=refresh_token&client_id=764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com&client_secret=d-FL95Q19q7MQmFpd7hHD0Ty&refresh_token=1%2F%2F0gLiyt963b8sOCgYIARAAGBASNwF-L9Ir7IWWSZUm-v0s2idDS4EYVX45KS8GZ0v9e0Ykjq8V8oMGXrcCohA6WFBUlBNSpkdOH_Y - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate, br, zstd - Connection: - - keep-alive - Content-Length: - - '268' - Content-Type: - - application/x-www-form-urlencoded - User-Agent: - - python-requests/2.32.3 - x-goog-api-client: - - gl-python/3.9.12 auth/2.35.0 cred-type/u - method: POST - uri: https://oauth2.googleapis.com/token - response: - body: - string: "{\n \"access_token\": \"ya29.a0AeDClZBelAx2FDCO9JlblsGuPjsHSW5rx4MyF9nZPH7RCoRnxTLfkPNT-cdst5JH9wJ9KQ7mEENoL6oh9v9Jlnu5ood7_BiZc_jr2xiooG_cFn46FpoRt527-fIgAVNK0ZrRR_nwQisqLlM8C5UHfbN8flTVnzPChcbDT1hJlwaCgYKAfUSARESFQHGX2Mi0EYI27EDFa1piqKlnYadLg0177\",\n - \ \"expires_in\": 3599,\n \"scope\": \"https://www.googleapis.com/auth/userinfo.email - https://www.googleapis.com/auth/sqlservice.login openid https://www.googleapis.com/auth/cloud-platform\",\n - \ \"token_type\": \"Bearer\",\n \"id_token\": \"eyJhbGciOiJSUzI1NiIsImtpZCI6ImM4OGQ4MDlmNGRiOTQzZGY1M2RhN2FjY2ZkNDc3NjRkMDViYTM5MWYiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJhdWQiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMTI5OTYyMDUzOTczMDQ4ODIzNTciLCJoZCI6IndhbmRiLmNvbSIsImVtYWlsIjoic291bWlrLnJha3NoaXRAd2FuZGIuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJ5MlVJWXZkVmhhSXBFQldGdWFhSGRRIiwiaWF0IjoxNzMwMTUxMDc0LCJleHAiOjE3MzAxNTQ2NzR9.RdVJ0QIyEt3YgpCwzQaIy-yhAfsd5wgm5lQvyPowbRRU39hEIydyVzR2UokywCf_k6m8xG7Kn6E66EifgxK-4Po088aHvhf7_yao1PCRDxKsLNpn0bHLJq5ymVvndKJMkvndySrBM2eByY7vTTbQU5emusvFK9XZmp2l4HBRNIpt8a_6vXQiIjBnjxaCGArVeRaZ3AViwN8J0Bwy5QdPZGvKDjVZiGlb7lTPHedQ-Lo4eUXuW83xUDW2WgeCx-e-GKg_oCeGd0rwE18Pz4Sm17y-SUQKMMrdwB8HGD_VpK8TKmoEsOr8n3t2-G31ua0wmG9qMlTzYrY8f3Kn8C-oVQ\"\n}" - headers: - Alt-Svc: - - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 - Cache-Control: - - no-cache, no-store, max-age=0, must-revalidate - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=utf-8 - Date: - - Mon, 28 Oct 2024 21:31:14 GMT - Expires: - - Mon, 01 Jan 1990 00:00:00 GMT - Pragma: - - no-cache - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async.yaml b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async.yaml deleted file mode 100644 index 127cbfc7455..00000000000 --- a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async.yaml +++ /dev/null @@ -1,59 +0,0 @@ -interactions: -- request: - body: grant_type=refresh_token&client_id=764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com&client_secret=d-FL95Q19q7MQmFpd7hHD0Ty&refresh_token=1%2F%2F0gLiyt963b8sOCgYIARAAGBASNwF-L9Ir7IWWSZUm-v0s2idDS4EYVX45KS8GZ0v9e0Ykjq8V8oMGXrcCohA6WFBUlBNSpkdOH_Y - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate, br, zstd - Connection: - - keep-alive - Content-Length: - - '268' - Content-Type: - - application/x-www-form-urlencoded - User-Agent: - - python-requests/2.32.3 - x-goog-api-client: - - gl-python/3.9.12 auth/2.35.0 cred-type/u - method: POST - uri: https://oauth2.googleapis.com/token - response: - body: - string: "{\n \"access_token\": \"ya29.a0AeDClZBIaRvWVOj-lnnQHc0YShBfhL67Y57maRDl9ZDGGOpyq7xDxGtTXDxIBo1SI559lJuE797LzI2n0xz-uZhq7xr4FxVzP_WtRggMybnpeLyg7FgrZ3Gyy2k9yuc407HKvsWj26DMoycri2BnMtaLsoeqsaEwKh5P4wZ96AaCgYKAcESARESFQHGX2Milar5gawtvBtJf_wx1jn0qA0177\",\n - \ \"expires_in\": 3599,\n \"scope\": \"https://www.googleapis.com/auth/cloud-platform - openid https://www.googleapis.com/auth/sqlservice.login https://www.googleapis.com/auth/userinfo.email\",\n - \ \"token_type\": \"Bearer\",\n \"id_token\": \"eyJhbGciOiJSUzI1NiIsImtpZCI6ImM4OGQ4MDlmNGRiOTQzZGY1M2RhN2FjY2ZkNDc3NjRkMDViYTM5MWYiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJhdWQiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMTI5OTYyMDUzOTczMDQ4ODIzNTciLCJoZCI6IndhbmRiLmNvbSIsImVtYWlsIjoic291bWlrLnJha3NoaXRAd2FuZGIuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJ3b1FiM29pWm1qbEdqb0dkMlV4cExnIiwiaWF0IjoxNzMwMjY0MDIxLCJleHAiOjE3MzAyNjc2MjF9.T7iii2t706z62Wwgbc9jjaPpiacYIK3_pITHSjxbxpJT0ckAI0Ah3VsHSpwgWehlXb1_qnmNYQFIBOsd2iiWN_wuCVGWaHUZOlQPxBrzvB15qyIGQ5WR4QnDa8t29Mqb8wnukPuvUxqBfol5PDTcjT2p3SP22Kk5dtZ3OULDEsi_qx2CVv-jmG149pjIelZlKAWiQMjXZTQyhl4Yl8JaqCOOUSN7g-ePec3pC0G_ceoH1IazKAOWnlBWnv9lmssMQJgNX884qiKtP1HLbpnWHqX68wDPHyFZK6Ur8vJWaFt5C085a3dbXG_4lBeQlb9cWMSl6hL_TQP4caUsNtjTkQ\"\n}" - headers: - Alt-Svc: - - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 - Cache-Control: - - no-cache, no-store, max-age=0, must-revalidate - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=utf-8 - Date: - - Wed, 30 Oct 2024 04:53:41 GMT - Expires: - - Mon, 01 Jan 1990 00:00:00 GMT - Pragma: - - no-cache - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async_stream.yaml b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async_stream.yaml deleted file mode 100644 index 60ef5410cd3..00000000000 --- a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_async_stream.yaml +++ /dev/null @@ -1,59 +0,0 @@ -interactions: -- request: - body: grant_type=refresh_token&client_id=764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com&client_secret=d-FL95Q19q7MQmFpd7hHD0Ty&refresh_token=1%2F%2F0gLiyt963b8sOCgYIARAAGBASNwF-L9Ir7IWWSZUm-v0s2idDS4EYVX45KS8GZ0v9e0Ykjq8V8oMGXrcCohA6WFBUlBNSpkdOH_Y - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate, br, zstd - Connection: - - keep-alive - Content-Length: - - '268' - Content-Type: - - application/x-www-form-urlencoded - User-Agent: - - python-requests/2.32.3 - x-goog-api-client: - - gl-python/3.9.12 auth/2.35.0 cred-type/u - method: POST - uri: https://oauth2.googleapis.com/token - response: - body: - string: "{\n \"access_token\": \"ya29.a0AeDClZB3Y3dmQNSdSh1JquBqC6YlhbgHqCix4civq2GYwDk8SppuSx853hJSZzBqW0kI_Lq1lqnxcE7EgGoqF3tZS9EGK8V8esMork6ii_qx__C4ac8V7Jobm_C7t9lmvSbmqED1OO1A887bxSFQf8wKS-JGoHCqOnvXa1Xj0gaCgYKAWoSARESFQHGX2MiE1fTNI5Pq9FRb6hkD7SIfg0177\",\n - \ \"expires_in\": 3599,\n \"scope\": \"https://www.googleapis.com/auth/sqlservice.login - https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/cloud-platform - openid\",\n \"token_type\": \"Bearer\",\n \"id_token\": \"eyJhbGciOiJSUzI1NiIsImtpZCI6ImM4OGQ4MDlmNGRiOTQzZGY1M2RhN2FjY2ZkNDc3NjRkMDViYTM5MWYiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJhdWQiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMTI5OTYyMDUzOTczMDQ4ODIzNTciLCJoZCI6IndhbmRiLmNvbSIsImVtYWlsIjoic291bWlrLnJha3NoaXRAd2FuZGIuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJXVXBVQjZLaEJZSnJ2ZGdTa2hUSE1nIiwiaWF0IjoxNzMwMjY0MDc1LCJleHAiOjE3MzAyNjc2NzV9.si22Fdiud05WQglgEx6TLCFz7WLhnyFhLszlm6-G1wa4roteWRQufet4GIXhUXjDdWGLoMP27SffPXFoJ6tT4j4BHWnh7bndM2zWCtMAtub2fDkL7wBIwjarI3Xe8mCo9Zqv0FgDTGbxqIQJ33FKBr2d99gfYGJ6dYQznyy6OonuHEUYv_q8XD5s9oMDJ3ejpLDyGhx4pOI7RIiKSlyiszrCAtJdzqFZvKci8Ak1xfOCmrCN74nGOdD8PobDgaT7ZDkfqGLs7OXNuW-6S0hNNRSFadG3Zi9TTakxlcYeojlZnpzxuS9uaObAp8oDDuZ68TC4c0ajMjmLblfXlKfI_w\"\n}" - headers: - Alt-Svc: - - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 - Cache-Control: - - no-cache, no-store, max-age=0, must-revalidate - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=utf-8 - Date: - - Wed, 30 Oct 2024 04:54:35 GMT - Expires: - - Mon, 01 Jan 1990 00:00:00 GMT - Pragma: - - no-cache - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_stream.yaml b/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_stream.yaml deleted file mode 100644 index da514f00c69..00000000000 --- a/tests/integrations/vertexai/cassettes/vertexai_test/test_content_generation_stream.yaml +++ /dev/null @@ -1,59 +0,0 @@ -interactions: -- request: - body: grant_type=refresh_token&client_id=764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com&client_secret=d-FL95Q19q7MQmFpd7hHD0Ty&refresh_token=1%2F%2F0gLiyt963b8sOCgYIARAAGBASNwF-L9Ir7IWWSZUm-v0s2idDS4EYVX45KS8GZ0v9e0Ykjq8V8oMGXrcCohA6WFBUlBNSpkdOH_Y - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate, br, zstd - Connection: - - keep-alive - Content-Length: - - '268' - Content-Type: - - application/x-www-form-urlencoded - User-Agent: - - python-requests/2.32.3 - x-goog-api-client: - - gl-python/3.9.12 auth/2.35.0 cred-type/u - method: POST - uri: https://oauth2.googleapis.com/token - response: - body: - string: "{\n \"access_token\": \"ya29.a0AeDClZDPI3uAyf9H7JW4UIb9JW_kMHCCJqkDGc6w8roBW619IiOJ1UzyUieWpy1ayyRGyTwaXEIDYABTxwIlSjfWwcsRrfbidnkqMBCAy0XWgMfdnMfRUy4P_nqFCLz-dsfUYXXhCm8i8ZyfvUiaNF506xoNDCt02UsjLqFJhAaCgYKATESARESFQHGX2MiJcXnY1oyo2KG_7hFtnjQoA0177\",\n - \ \"expires_in\": 3599,\n \"scope\": \"https://www.googleapis.com/auth/cloud-platform - https://www.googleapis.com/auth/userinfo.email openid https://www.googleapis.com/auth/sqlservice.login\",\n - \ \"token_type\": \"Bearer\",\n \"id_token\": \"eyJhbGciOiJSUzI1NiIsImtpZCI6ImM4OGQ4MDlmNGRiOTQzZGY1M2RhN2FjY2ZkNDc3NjRkMDViYTM5MWYiLCJ0eXAiOiJKV1QifQ.eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhenAiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJhdWQiOiI3NjQwODYwNTE4NTAtNnFyNHA2Z3BpNmhuNTA2cHQ4ZWp1cTgzZGkzNDFodXIuYXBwcy5nb29nbGV1c2VyY29udGVudC5jb20iLCJzdWIiOiIxMTI5OTYyMDUzOTczMDQ4ODIzNTciLCJoZCI6IndhbmRiLmNvbSIsImVtYWlsIjoic291bWlrLnJha3NoaXRAd2FuZGIuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImF0X2hhc2giOiJFcTFRUXphQUMxNm9EZEhpTGZRX2tBIiwiaWF0IjoxNzMwMTUxMTM2LCJleHAiOjE3MzAxNTQ3MzZ9.GZoOYdP9uVjPQXkCgA5zgFbZ-ZcxmVZD5bhiKQ6T2sHKtACc0dh9hEOui1oG4eTD-VTUFNjIUEtl_czmO6RXo4iBbeFNEhIjUuTWA6Kjgd6tzqlB1QKHzrRNnJas0kjLQHEtt7JZpbQ6ccSC4388cNvPwidUJsZqGwqp9wTVJxvQFbrCJF0YcaPkUKZTE-eqO4-ezTyR3SwDbm3jEFsd1oHhyrIwyPr7BSp43iTisBxe7h1Rtv0gpY1iLVTXU0BvLKzbespzX_sz4ur90O2e8Ar2zGv9DKYEEd3JL34FptU37wLMYABW_eUMYFgSGmys0YBM8LjbEHV1cproZiJUTw\"\n}" - headers: - Alt-Svc: - - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 - Cache-Control: - - no-cache, no-store, max-age=0, must-revalidate - Content-Encoding: - - gzip - Content-Type: - - application/json; charset=utf-8 - Date: - - Mon, 28 Oct 2024 21:32:16 GMT - Expires: - - Mon, 01 Jan 1990 00:00:00 GMT - Pragma: - - no-cache - Server: - - scaffolding on HTTPServer2 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - SAMEORIGIN - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -version: 1 diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index 852a746e05f..de705b8dbc5 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -7,10 +7,6 @@ @pytest.mark.retry(max_attempts=5) @pytest.mark.skip_clickhouse_client -@pytest.mark.vcr( - filter_headers=["authorization"], - allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], -) def test_content_generation(client): import vertexai from vertexai.generative_models import GenerativeModel @@ -32,10 +28,6 @@ def test_content_generation(client): @pytest.mark.retry(max_attempts=5) @pytest.mark.skip_clickhouse_client -@pytest.mark.vcr( - filter_headers=["authorization"], - allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], -) def test_content_generation_stream(client): import vertexai from vertexai.generative_models import GenerativeModel @@ -58,18 +50,15 @@ def test_content_generation_stream(client): @pytest.mark.retry(max_attempts=5) +@pytest.mark.asyncio @pytest.mark.skip_clickhouse_client -@pytest.mark.vcr( - filter_headers=["authorization"], - allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], -) -def test_content_generation_async(client): +async def test_content_generation_async(client): import vertexai from vertexai.generative_models import GenerativeModel vertexai.init(project="wandb-growth", location="us-central1") model = GenerativeModel("gemini-1.5-flash") - asyncio.run(model.generate_content_async("What is the capital of France?")) + await model.generate_content_async("What is the capital of France?") calls = list(client.calls()) assert len(calls) == 1 @@ -83,12 +72,9 @@ def test_content_generation_async(client): @pytest.mark.retry(max_attempts=5) +@pytest.mark.asyncio @pytest.mark.skip_clickhouse_client -@pytest.mark.vcr( - filter_headers=["authorization"], - allowed_hosts=["api.wandb.ai", "localhost", "trace.wandb.ai"], -) -def test_content_generation_async_stream(client): +async def test_content_generation_async_stream(client): import vertexai from vertexai.generative_models import GenerativeModel @@ -104,7 +90,7 @@ async def get_response(): chunks.append(chunk.text) return chunks - asyncio.run(get_response()) + await get_response() calls = list(client.calls()) assert len(calls) == 1 From 7ae4ff3be2caf89113b89cedafd5cd5c3b74f317 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 5 Nov 2024 02:42:42 +0530 Subject: [PATCH 12/16] update: docs --- .../docs/guides/integrations/google-gemini.md | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/docs/docs/guides/integrations/google-gemini.md b/docs/docs/guides/integrations/google-gemini.md index 351fb1247e7..6afc2790b3d 100644 --- a/docs/docs/guides/integrations/google-gemini.md +++ b/docs/docs/guides/integrations/google-gemini.md @@ -16,13 +16,28 @@ import os import google.generativeai as genai import weave -weave.init(project_name="google_ai_studio-test") +weave.init(project_name="google-ai-studio-test") genai.configure(api_key=os.environ["GOOGLE_API_KEY"]) model = genai.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Write a story about an AI and magic") ``` +Weave will also automatically capture traces for [Vertex APIs](https://cloud.google.com/vertexai/docs). To start tracking, calling `weave.init(project_name="")` and use the library as normal. + +```python +import vertexai +import weave +from vertexai.generative_models import GenerativeModel + +weave.init(project_name="vertex-ai-test") +vertexai.init(project="", location="") +model = GenerativeModel("gemini-1.5-flash-002") +response = model.generate_content( + "What's a good name for a flower shop specialising in selling dried flower bouquets?" +) +``` + ## Track your own ops Wrapping a function with `@weave.op` starts capturing inputs, outputs and app logic so you can debug how data flows through your app. You can deeply nest ops and build a tree of functions that you want to track. This also starts automatically versioning code as you experiment to capture ad-hoc details that haven't been committed to git. @@ -97,11 +112,3 @@ Given a weave reference to any `weave.Model` object, you can spin up a fastapi s ```shell weave serve weave:///your_entity/project-name/YourModel: ``` - -## Vertex API - -Full Weave support for the `Vertex AI SDK` python package is currently in development, however there is a way you can integrate Weave with the Vertex API. - -Vertex API supports OpenAI SDK compatibility ([docs](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/call-gemini-using-openai-library)), and if this is a way you build your application, Weave will automatically track your LLM calls via our [OpenAI](/guides/integrations/openai) SDK integration. - -\* Please note that some features may not fully work as Vertex API doesn't implement the full OpenAI SDK capabilities. From 65c4cb3d6e351c44c9338db2e21ffd08a3670540 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Tue, 5 Nov 2024 02:45:03 +0530 Subject: [PATCH 13/16] fix: lint --- tests/integrations/vertexai/vertexai_test.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index de705b8dbc5..1cf00642db1 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -1,5 +1,3 @@ -import asyncio - import pytest from weave.integrations.integration_utilities import op_name_from_ref From e137602d3c09c5e8a4b97fd735ad1e852727350e Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Thu, 7 Nov 2024 22:33:47 +0530 Subject: [PATCH 14/16] add: dictify support --- tests/integrations/vertexai/vertexai_test.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index 1cf00642db1..de8bde04dfe 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -21,7 +21,11 @@ def test_content_generation(client): trace_name = op_name_from_ref(call.op_name) assert trace_name == "vertexai.GenerativeModel.generate_content" - assert "paris" in str(call.output).lower() + output = call.output + assert "paris" in output["candidates"][0]["content"]["parts"][0]["text"].lower() + assert output["candidates"][0]["content"]["role"] == "model" + assert output["candidates"][0]["finish_reason"] == "STOP" + assert "gemini-1.5-flash" in output["model_version"] @pytest.mark.retry(max_attempts=5) @@ -44,7 +48,9 @@ def test_content_generation_stream(client): trace_name = op_name_from_ref(call.op_name) assert trace_name == "vertexai.GenerativeModel.generate_content" - assert "paris" in str(call.output).lower() + output = call.output + assert "paris" in output["candidates"][0]["content"]["parts"][0]["text"].lower() + assert output["candidates"][0]["content"]["role"] == "model" @pytest.mark.retry(max_attempts=5) @@ -66,7 +72,11 @@ async def test_content_generation_async(client): trace_name = op_name_from_ref(call.op_name) assert trace_name == "vertexai.GenerativeModel.generate_content_async" - assert "paris" in str(call.output).lower() + output = call.output + assert "paris" in output["candidates"][0]["content"]["parts"][0]["text"].lower() + assert output["candidates"][0]["content"]["role"] == "model" + assert output["candidates"][0]["finish_reason"] == "STOP" + assert "gemini-1.5-flash" in output["model_version"] @pytest.mark.retry(max_attempts=5) @@ -98,4 +108,6 @@ async def get_response(): trace_name = op_name_from_ref(call.op_name) assert trace_name == "vertexai.GenerativeModel.generate_content_async" - assert "paris" in str(call.output).lower() + output = call.output + assert "paris" in output["candidates"][0]["content"]["parts"][0]["text"].lower() + assert output["candidates"][0]["content"]["role"] == "model" From 00b16571a90ec09f96a261dcb1ccfd8a61d9469b Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Fri, 8 Nov 2024 02:12:22 +0530 Subject: [PATCH 15/16] update: tests --- tests/integrations/vertexai/vertexai_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index de8bde04dfe..82bc7a19f4f 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -3,7 +3,7 @@ from weave.integrations.integration_utilities import op_name_from_ref -@pytest.mark.retry(max_attempts=5) +@pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.skip_clickhouse_client def test_content_generation(client): import vertexai @@ -28,7 +28,7 @@ def test_content_generation(client): assert "gemini-1.5-flash" in output["model_version"] -@pytest.mark.retry(max_attempts=5) +@pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.skip_clickhouse_client def test_content_generation_stream(client): import vertexai @@ -53,7 +53,7 @@ def test_content_generation_stream(client): assert output["candidates"][0]["content"]["role"] == "model" -@pytest.mark.retry(max_attempts=5) +@pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.asyncio @pytest.mark.skip_clickhouse_client async def test_content_generation_async(client): @@ -79,7 +79,7 @@ async def test_content_generation_async(client): assert "gemini-1.5-flash" in output["model_version"] -@pytest.mark.retry(max_attempts=5) +@pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.asyncio @pytest.mark.skip_clickhouse_client async def test_content_generation_async_stream(client): From f256ec4e8d3a3260d38ced955850ad928a239d08 Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Fri, 22 Nov 2024 12:07:34 +0530 Subject: [PATCH 16/16] add: skips to vertexai tests --- tests/integrations/vertexai/vertexai_test.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/integrations/vertexai/vertexai_test.py b/tests/integrations/vertexai/vertexai_test.py index 82bc7a19f4f..4af39e62e32 100644 --- a/tests/integrations/vertexai/vertexai_test.py +++ b/tests/integrations/vertexai/vertexai_test.py @@ -3,6 +3,9 @@ from weave.integrations.integration_utilities import op_name_from_ref +@pytest.mark.skip( + reason="This test depends on a non-deterministic external service provider" +) @pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.skip_clickhouse_client def test_content_generation(client): @@ -28,6 +31,9 @@ def test_content_generation(client): assert "gemini-1.5-flash" in output["model_version"] +@pytest.mark.skip( + reason="This test depends on a non-deterministic external service provider" +) @pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.skip_clickhouse_client def test_content_generation_stream(client): @@ -53,6 +59,9 @@ def test_content_generation_stream(client): assert output["candidates"][0]["content"]["role"] == "model" +@pytest.mark.skip( + reason="This test depends on a non-deterministic external service provider" +) @pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.asyncio @pytest.mark.skip_clickhouse_client @@ -79,6 +88,9 @@ async def test_content_generation_async(client): assert "gemini-1.5-flash" in output["model_version"] +@pytest.mark.skip( + reason="This test depends on a non-deterministic external service provider" +) @pytest.mark.flaky(reruns=5, reruns_delay=2) @pytest.mark.asyncio @pytest.mark.skip_clickhouse_client