From 9c119626467671b49ee0a670e6aac13772d6368a Mon Sep 17 00:00:00 2001 From: Abraham Leal <45460452+abraham-leal@users.noreply.github.com> Date: Mon, 16 Dec 2024 19:12:32 -0600 Subject: [PATCH] fix nvidia title --- docs/docs/guides/integrations/nvidia_nim.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/docs/guides/integrations/nvidia_nim.md b/docs/docs/guides/integrations/nvidia_nim.md index f8803ea076d..f909bbd0fa9 100644 --- a/docs/docs/guides/integrations/nvidia_nim.md +++ b/docs/docs/guides/integrations/nvidia_nim.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# NVIDIA NIM Inference Microservices +# NVIDIA NeMo Inference Microservices Weave automatically tracks and logs LLM calls made via the [ChatNVIDIA](https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/) library, after `weave.init()` is called. @@ -39,7 +39,7 @@ It’s important to store traces of LLM applications in a central database, both -[![chatnvidia_trace.png](imgs/chatnvidia_trace.png)] +![chatnvidia_trace.png](imgs/chatnvidia_trace.png) ## Track your own ops @@ -115,7 +115,7 @@ Navigate to Weave and you can click `get_pokemon_data` in the UI to see the inpu -[![nvidia_pokedex.png](imgs/nvidia_pokedex.png)] +![nvidia_pokedex.png](imgs/nvidia_pokedex.png) ## Create a `Model` for easier experimentation @@ -168,7 +168,7 @@ Navigate to Weave and you can click `get_pokemon_data` in the UI to see the inpu -[![chatnvidia_model.png](imgs/chatnvidia_model.png)](https://wandb.ai/_scott/grammar-openai/weave/calls) +![chatnvidia_model.png](imgs/chatnvidia_model.png) ## Usage Info