Skip to content

Commit

Permalink
prepare for release (#1007)
Browse files Browse the repository at this point in the history
  • Loading branch information
theomonnom authored Oct 30, 2024
1 parent 8404cf3 commit 334ffd0
Show file tree
Hide file tree
Showing 17 changed files with 308 additions and 185 deletions.
6 changes: 6 additions & 0 deletions .changeset/wild-cougars-decide.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
"livekit-agents": minor
"livekit-plugins-llama-index": minor
---

prepare for release
28 changes: 24 additions & 4 deletions .github/workflows/check-types.yml → .github/workflows/ci,yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Check Types
name: CI

on:
push:
Expand All @@ -12,9 +12,29 @@ on:
workflow_dispatch:

jobs:
type-check:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4

- uses: actions/setup-python@v4
with:
python-version: "3.9"
cache: "pip"

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install ruff

- name: Ruff
run: ruff check --output-format=github .

- name: Check format
run: ruff format --check .

type-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
Expand Down Expand Up @@ -53,8 +73,7 @@ jobs:
types-psutil \
types-pyOpenSSL \
types-requests \
types-openpyxl \
types-requests
types-openpyxl

- name: Check Types
run: |
Expand All @@ -70,3 +89,4 @@ jobs:
-p livekit.plugins.rag \
-p livekit.plugins.azure \
-p livekit.plugins.anthropic

31 changes: 0 additions & 31 deletions .github/workflows/ruff.yml

This file was deleted.

59 changes: 44 additions & 15 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Tests
name: tests

on:
push:
Expand All @@ -13,25 +13,34 @@ on:

jobs:
tests:
# don't run tests for PRs on forks
# Don't run tests for PRs on forks
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false
strategy:
fail-fast: false
matrix:
include:
# Base tests on all operating systems
- os: macos-14-large
platform: macos
test_group: base
- os: macos-14
platform: macos
test_group: base
- os: windows-2019
platform: windows
test_group: base
- os: ubuntu-20.04
platform: linux
test_group: base
- os: namespace-profile-default-arm64
platform: linux
test_group: base

# Run llm, stt, and tts tests only on Ubuntu
- os: ubuntu-20.04
test_group: llm
- os: ubuntu-20.04
test_group: stt
- os: ubuntu-20.04
test_group: tts

runs-on: ${{ matrix.os }}
name: Run tests (${{ matrix.os }})
name: ${{ matrix.test_group }} — ${{ matrix.os }})
steps:
- uses: actions/checkout@v4
with:
Expand All @@ -50,14 +59,12 @@ jobs:
${{ runner.os }}-cache
- uses: actions/setup-python@v5
# on mac, ffmpeg installs python
if: ${{ matrix.os != 'macos' }}
with:
python-version: "3.9"
cache: "pip"

- name: Install ffmpeg (Linux)
if: ${{ matrix.platform == 'linux' }}
if: ${{ matrix.os == 'ubuntu-20.04' || matrix.os == 'namespace-profile-default-arm64' }}
run: sudo apt-get update && sudo apt-get install -y ffmpeg

# Azure plugin fails with OpenSSL3, and Ubuntu 22.04 does not include libssl1.1 in its repos
Expand All @@ -70,11 +77,11 @@ jobs:
sudo dpkg -i libssl-dev_1.1.1-1ubuntu2.1_arm64.deb
- name: Install ffmpeg (macOS)
if: ${{ matrix.platform == 'macos' }}
if: ${{ startsWith(matrix.os, 'macos') }}
run: brew install ffmpeg

- name: Install ffmpeg (Windows)
if: ${{ matrix.platform == 'windows' }}
if: ${{ matrix.os == 'windows-2019' }}
run: choco install ffmpeg

- name: Install packages
Expand Down Expand Up @@ -108,6 +115,28 @@ jobs:
GOOGLE_CREDENTIALS_JSON: ${{ secrets.GOOGLE_CREDENTIALS_JSON }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
GOOGLE_APPLICATION_CREDENTIALS: google.json
PYTEST_ADDOPTS: "--color=yes"
working-directory: tests
run: |
echo $GOOGLE_CREDENTIALS_JSON > google.json
pytest --asyncio-mode=auto --timeout=60 .
echo "$GOOGLE_CREDENTIALS_JSON" > google.json
case "${{ matrix.test_group }}" in
base)
test_files="test_aio.py test_tokenizer.py test_vad.py test_ipc.py"
;;
llm)
test_files="test_llm.py"
;;
stt)
test_files="test_stt.py"
;;
tts)
test_files="test_tts.py"
;;
*)
echo "Unknown test group: ${{ matrix.test_group }}"
exit 1
;;
esac
pytest --asyncio-mode=auto --timeout=60 $test_files
11 changes: 11 additions & 0 deletions examples/voice-pipeline-agent/llamaindex-rag/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# RAG Example using LlamaIndex

This repository showcases three ways to build a voice assistant with Retrieval-Augmented Generation (RAG) using LlamaIndex:

1. **`chat_engine.py`**: Utilizes LlamaIndex's `as_chat_engine` for a straightforward, integrated solution. **Trade-off**: Lacks function calling support, limiting advanced interactions.

2. **`query_engine.py`**: Uses an LLM that supports function calling (e.g., OpenAI's models) to define custom functions like `query_info` for retrieval. **Trade-off**: Requires additional setup but offers greater flexibility.

3. **`retrieval.py`**: Manually injects retrieved context into the system prompt using LlamaIndex's retriever. **Trade-off**: Provides fine-grained control but involves complex prompt engineering.

**Current recommended way**: Use **`query_engine.py`** for its balance of flexibility and control, enabling function calling and custom behaviors without excessive complexity.
3 changes: 1 addition & 2 deletions examples/voice-pipeline-agent/llamaindex-rag/chat_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)

chat_engine = index.as_chat_engine(chat_mode=ChatMode.CONTEXT)


async def entrypoint(ctx: JobContext):
initial_ctx = llm.ChatContext().append(
Expand All @@ -38,6 +36,7 @@ async def entrypoint(ctx: JobContext):
"You should use short and concise responses, and avoiding usage of unpronouncable punctuation."
),
)
chat_engine = index.as_chat_engine(chat_mode=ChatMode.CONTEXT)

await ctx.connect(auto_subscribe=AutoSubscribe.AUDIO_ONLY)

Expand Down
13 changes: 9 additions & 4 deletions livekit-agents/livekit/agents/cli/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,14 @@
"watchfiles",
]


def _silence_noisy_loggers() -> None:
for noisy_logger in NOISY_LOGGERS:
logger = logging.getLogger(noisy_logger)
if logger.level == logging.NOTSET:
logger.setLevel(logging.WARN)


# skip default LogRecord attributes
# http://docs.python.org/library/logging.html#logrecord-attributes
_RESERVED_ATTRS: Tuple[str, ...] = (
Expand Down Expand Up @@ -209,10 +217,7 @@ def setup_logging(log_level: str, devmode: bool) -> None:
root.addHandler(handler)
root.setLevel(log_level)

for noisy_logger in NOISY_LOGGERS:
logger = logging.getLogger(noisy_logger)
if logger.level == logging.NOTSET:
logger.setLevel(logging.WARN)
_silence_noisy_loggers()

from ..log import logger

Expand Down
12 changes: 7 additions & 5 deletions livekit-agents/livekit/agents/pipeline/pipeline_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ class _ImplOptions:
int_speech_duration: float
int_min_words: int
min_endpointing_delay: float
max_recursive_fnc_calls: int
max_nested_fnc_calls: int
preemptive_synthesis: bool
before_llm_cb: BeforeLLMCallback
before_tts_cb: BeforeTTSCallback
Expand Down Expand Up @@ -161,7 +161,7 @@ def __init__(
interrupt_speech_duration: float = 0.5,
interrupt_min_words: int = 0,
min_endpointing_delay: float = 0.5,
max_recursive_fnc_calls: int = 1,
max_nested_fnc_calls: int = 1,
preemptive_synthesis: bool = False,
transcription: AgentTranscriptionOptions = AgentTranscriptionOptions(),
before_llm_cb: BeforeLLMCallback = _default_before_llm_cb,
Expand All @@ -186,6 +186,8 @@ def __init__(
interrupt_min_words: Minimum number of words to consider for interruption.
Defaults to 0 as this may increase the latency depending on the STT.
min_endpointing_delay: Delay to wait before considering the user finished speaking.
max_nested_fnc_calls: Maximum number of nested function calls allowed for chaining
function calls (e.g functions that depend on each other).
preemptive_synthesis: Whether to preemptively synthesize responses.
transcription: Options for assistant transcription.
before_llm_cb: Callback called when the assistant is about to synthesize a reply.
Expand Down Expand Up @@ -216,7 +218,7 @@ def __init__(
int_speech_duration=interrupt_speech_duration,
int_min_words=interrupt_min_words,
min_endpointing_delay=min_endpointing_delay,
max_recursive_fnc_calls=max_recursive_fnc_calls,
max_nested_fnc_calls=max_nested_fnc_calls,
preemptive_synthesis=preemptive_synthesis,
transcription=transcription,
before_llm_cb=before_llm_cb,
Expand Down Expand Up @@ -734,7 +736,7 @@ def _commit_user_question_if_needed() -> None:

new_function_calls = llm_stream.function_calls

for i in range(self._opts.max_recursive_fnc_calls):
for i in range(self._opts.max_nested_fnc_calls):
self.emit("function_calls_collected", new_function_calls)

called_fncs = []
Expand Down Expand Up @@ -788,7 +790,7 @@ def _commit_user_question_if_needed() -> None:
answer_llm_stream = self._llm.chat(
chat_ctx=chat_ctx,
fnc_ctx=self.fnc_ctx
if i < self._opts.max_recursive_fnc_calls - 1
if i < self._opts.max_nested_fnc_calls - 1
else None,
)
answer_synthesis = self._synthesize_agent_speech(
Expand Down
4 changes: 2 additions & 2 deletions livekit-agents/livekit/agents/tokenize/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class TokenData:

class SentenceTokenizer(ABC):
@abstractmethod
def tokenize(self, *, text: str, language: str | None = None) -> list[str]:
def tokenize(self, text: str, *, language: str | None = None) -> list[str]:
pass

@abstractmethod
Expand Down Expand Up @@ -62,7 +62,7 @@ def _check_not_closed(self) -> None:

class WordTokenizer(ABC):
@abstractmethod
def tokenize(self, *, text: str, language: str | None = None) -> list[str]:
def tokenize(self, text: str, *, language: str | None = None) -> list[str]:
pass

@abstractmethod
Expand Down
Loading

0 comments on commit 334ffd0

Please sign in to comment.