diff --git a/examples/voice-assistant/llamaindex-rag/chat_engine.py b/examples/voice-pipeline-agent/llamaindex-rag/chat_engine.py similarity index 95% rename from examples/voice-assistant/llamaindex-rag/chat_engine.py rename to examples/voice-pipeline-agent/llamaindex-rag/chat_engine.py index 755df9d9b..bc7f4382c 100644 --- a/examples/voice-assistant/llamaindex-rag/chat_engine.py +++ b/examples/voice-pipeline-agent/llamaindex-rag/chat_engine.py @@ -2,7 +2,7 @@ from dotenv import load_dotenv from livekit.agents import AutoSubscribe, JobContext, WorkerOptions, cli, llm -from livekit.agents.voice_assistant import VoiceAssistant +from livekit.agents.pipeline import VoicePipelineAgent from livekit.plugins import deepgram, llama_index, openai, silero from llama_index.core import ( SimpleDirectoryReader, @@ -41,7 +41,7 @@ async def entrypoint(ctx: JobContext): await ctx.connect(auto_subscribe=AutoSubscribe.AUDIO_ONLY) - assistant = VoiceAssistant( + assistant = VoicePipelineAgent( vad=silero.VAD.load(), stt=deepgram.STT(), llm=llama_index.LLM(chat_engine=chat_engine), diff --git a/examples/voice-assistant/llamaindex-rag/data/raw_data.txt b/examples/voice-pipeline-agent/llamaindex-rag/data/raw_data.txt similarity index 100% rename from examples/voice-assistant/llamaindex-rag/data/raw_data.txt rename to examples/voice-pipeline-agent/llamaindex-rag/data/raw_data.txt diff --git a/examples/voice-assistant/llamaindex-rag/query_engine.py b/examples/voice-pipeline-agent/llamaindex-rag/query_engine.py similarity index 95% rename from examples/voice-assistant/llamaindex-rag/query_engine.py rename to examples/voice-pipeline-agent/llamaindex-rag/query_engine.py index 71841e3bf..bb7605dd8 100644 --- a/examples/voice-assistant/llamaindex-rag/query_engine.py +++ b/examples/voice-pipeline-agent/llamaindex-rag/query_engine.py @@ -2,7 +2,7 @@ from dotenv import load_dotenv from livekit.agents import AutoSubscribe, JobContext, WorkerOptions, cli, llm -from livekit.agents.voice_assistant import VoiceAssistant +from livekit.agents.pipeline import VoicePipelineAgent from livekit.plugins import deepgram, openai, silero from llama_index.core import ( SimpleDirectoryReader, @@ -47,7 +47,7 @@ async def query_info(query: str) -> str: print("Query result:", res) return str(res) - assistant = VoiceAssistant( + assistant = VoicePipelineAgent( vad=silero.VAD.load(), stt=deepgram.STT(), llm=openai.LLM(), diff --git a/examples/voice-assistant/llamaindex-rag/retrieval.py b/examples/voice-pipeline-agent/llamaindex-rag/retrieval.py similarity index 93% rename from examples/voice-assistant/llamaindex-rag/retrieval.py rename to examples/voice-pipeline-agent/llamaindex-rag/retrieval.py index a2d490b09..c2f412cde 100644 --- a/examples/voice-assistant/llamaindex-rag/retrieval.py +++ b/examples/voice-pipeline-agent/llamaindex-rag/retrieval.py @@ -2,7 +2,7 @@ from dotenv import load_dotenv from livekit.agents import AutoSubscribe, JobContext, WorkerOptions, cli, llm -from livekit.agents.voice_assistant import VoiceAssistant +from livekit.agents.pipeline import VoicePipelineAgent from livekit.plugins import deepgram, openai, silero from llama_index.core import ( SimpleDirectoryReader, @@ -40,7 +40,7 @@ async def entrypoint(ctx: JobContext): initial_ctx.messages.append(system_msg) async def _will_synthesize_assistant_reply( - assistant: VoiceAssistant, chat_ctx: llm.ChatContext + assistant: VoicePipelineAgent, chat_ctx: llm.ChatContext ): ctx_msg = system_msg.copy() user_msg = chat_ctx.messages[-1] @@ -57,7 +57,7 @@ async def _will_synthesize_assistant_reply( await ctx.connect(auto_subscribe=AutoSubscribe.AUDIO_ONLY) - assistant = VoiceAssistant( + assistant = VoicePipelineAgent( vad=silero.VAD.load(), stt=deepgram.STT(), llm=openai.LLM(),