Skip to content

Commit

Permalink
WIP: fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
NickyHavoc committed Feb 21, 2024
1 parent 5da9a12 commit df55c13
Show file tree
Hide file tree
Showing 25 changed files with 28 additions and 93 deletions.
6 changes: 0 additions & 6 deletions run.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,7 @@
"""Fastapi server to run predictions."""
import os

from dotenv import load_dotenv
from fastapi import Depends, FastAPI

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
LimitedConcurrencyClient,
)
from intelligence_layer.core.model import AlephAlphaModel, LuminousControlModel
from intelligence_layer.core.tracer import NoOpTracer
from intelligence_layer.use_cases.classify.classify import (
Expand Down
3 changes: 0 additions & 3 deletions src/intelligence_layer/core/chunk.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,6 @@
from pydantic import BaseModel
from semantic_text_splitter import HuggingFaceTextSplitter

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.model import AlephAlphaModel
from intelligence_layer.core.task import Task
from intelligence_layer.core.tracer import TaskSpan
Expand Down
8 changes: 2 additions & 6 deletions src/intelligence_layer/core/echo.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
from functools import lru_cache
from typing import NewType, Sequence

from aleph_alpha_client import CompletionRequest, Prompt, Tokens
from aleph_alpha_client import Prompt, Tokens
from pydantic import BaseModel
from tokenizers import Encoding, Tokenizer # type: ignore
from tokenizers import Encoding # type: ignore

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.model import AlephAlphaModel, CompleteInput
from intelligence_layer.core.prompt_template import PromptTemplate
from intelligence_layer.core.task import Task, Token
Expand Down
3 changes: 0 additions & 3 deletions src/intelligence_layer/core/explain.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
from aleph_alpha_client import ExplanationRequest, ExplanationResponse
from pydantic import BaseModel

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.model import AlephAlphaModel
from intelligence_layer.core.task import Task
from intelligence_layer.core.tracer import TaskSpan
Expand Down
12 changes: 6 additions & 6 deletions src/intelligence_layer/core/instruct.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from typing import Optional

from pydantic import BaseModel
from intelligence_layer.core.model import AlephAlphaModel, CompleteInput, CompleteOutput

from intelligence_layer.core.model import AlephAlphaModel, CompleteInput, CompleteOutput
from intelligence_layer.core.task import Task
from intelligence_layer.core.tracer import TaskSpan

Expand All @@ -22,9 +23,8 @@ def do_run(self, input: InstructInput, task_span: TaskSpan) -> CompleteOutput:
prompt = self._model.to_instruct_prompt(
instruction=input.instruction,
input=input.input,
response_prefix=input.response_prefix
response_prefix=input.response_prefix,
)
return self._model.complete(
CompleteInput(prompt=prompt, maximum_tokens=input.maximum_tokens), task_span
)
return self._model.complete(CompleteInput(
prompt=prompt,
maximum_tokens=input.maximum_tokens
), task_span)
4 changes: 0 additions & 4 deletions src/intelligence_layer/core/text_highlight.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,6 @@
from aleph_alpha_client.explanation import TextScoreWithRaw
from pydantic import BaseModel

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
LimitedConcurrencyClient,
)
from intelligence_layer.core.explain import Explain, ExplainInput
from intelligence_layer.core.model import AlephAlphaModel
from intelligence_layer.core.prompt_template import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
LuminousControlModel,
)
from intelligence_layer.core.task import Task
from intelligence_layer.core.tracer import NoOpTracer, TaskSpan
from intelligence_layer.core.tracer import TaskSpan

INSTRUCT_CONFIGS = {
Language(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,9 @@
import re
from typing import Iterable, Mapping, Optional, Sequence

from aleph_alpha_client import PromptTemplate, Tokens
from aleph_alpha_client import Tokens
from pydantic import BaseModel

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.echo import EchoInput, EchoTask, TokenWithLogProb
from intelligence_layer.core.model import AlephAlphaModel, LuminousControlModel
from intelligence_layer.core.prompt_template import RichPrompt
Expand Down
9 changes: 0 additions & 9 deletions src/intelligence_layer/use_cases/intelligence_starter_app.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,6 @@
import os

from aleph_alpha_client import Client
from dotenv import load_dotenv
from fastapi import FastAPI

from intelligence_layer.connectors import (
AlephAlphaClientProtocol,
LimitedConcurrencyClient,
)
from intelligence_layer.core import IntelligenceApp
from intelligence_layer.core.model import LuminousControlModel
from intelligence_layer.use_cases.classify.classify import ClassifyInput
from intelligence_layer.use_cases.classify.prompt_based_classify import (
PromptBasedClassify,
Expand Down
3 changes: 0 additions & 3 deletions src/intelligence_layer/use_cases/qa/long_context_qa.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
from pydantic import BaseModel

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.connectors.retrievers.base_retriever import Document
from intelligence_layer.connectors.retrievers.qdrant_in_memory_retriever import (
QdrantInMemoryRetriever,
Expand Down
9 changes: 2 additions & 7 deletions src/intelligence_layer/use_cases/qa/multiple_chunk_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@

from pydantic import BaseModel

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.chunk import Chunk
from intelligence_layer.core.detect_language import Language, language_config
from intelligence_layer.core.model import (
Expand Down Expand Up @@ -124,10 +121,8 @@ class MultipleChunkQa(Task[MultipleChunkQaInput, MultipleChunkQaOutput]):
Args:
client: Aleph Alpha client instance for running model related API calls.
model: A valid Aleph Alpha model name.
Attributes:
MERGE_ANSWERS_INSTRUCTION: The instruction template used for combining multiple answers into one.
model: A valid Aleph Alpha model.
merge_answers_instruct_configs: Mapping language used to prompt parameters.
Example:
>>> import os
Expand Down
2 changes: 1 addition & 1 deletion src/intelligence_layer/use_cases/qa/single_chunk_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,4 +185,4 @@ def _get_highlights(
return [h.text for h in highlight_output.highlights if h.score > 0]

def _no_answer_to_none(self, completion: str, no_answer_str: str) -> Optional[str]:
return completion if no_answer_str in completion else None
return completion if no_answer_str not in completion else None
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from typing import Mapping

from intelligence_layer.connectors import AlephAlphaClientProtocol
from intelligence_layer.core import ChunkInput, ChunkTask, Task, TaskSpan
from intelligence_layer.core.chunk import ChunkOutput, ChunkOverlapTask
from intelligence_layer.core.detect_language import Language
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
from typing import Mapping

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core import Language, Task, TaskSpan
from intelligence_layer.core.model import (
from intelligence_layer.core import (
AlephAlphaModel,
CompleteInput,
CompleteOutput,
Language,
LuminousControlModel,
Task,
TaskSpan,
)
from intelligence_layer.use_cases.summarize.summarize import (
SingleChunkSummarizeInput,
Expand Down
2 changes: 1 addition & 1 deletion tests/core/test_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from aleph_alpha_client import CompletionRequest, Text
from aleph_alpha_client import Text
from pytest import fixture

from intelligence_layer.connectors.limited_concurrency_client import (
Expand Down
10 changes: 7 additions & 3 deletions tests/core/test_text_highlight.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,19 @@
from aleph_alpha_client import Image
from pytest import fixture, raises

from intelligence_layer.core.model import AlephAlphaModel, LuminousControlModel
from intelligence_layer.core.model import AlephAlphaModel
from intelligence_layer.core.prompt_template import PromptTemplate, RichPrompt
from intelligence_layer.core.text_highlight import TextHighlight, TextHighlightInput
from intelligence_layer.core.tracer import NoOpTracer


class AlephAlphaVanillaModel(AlephAlphaModel):
def to_instruct_prompt(self, instruction: str, input: str | None = None, response_prefix: str | None = None) -> RichPrompt:
def to_instruct_prompt(
self,
instruction: str,
input: str | None = None,
response_prefix: str | None = None,
) -> RichPrompt:
raise NotImplementedError()


Expand All @@ -31,7 +36,6 @@ def test_text_highlight(text_highlight: TextHighlight) -> None:
The international community is abuzz with plans for more focused research and potential interstellar missions.{% endpromptrange %}
Answer:"""
prompt_with_metadata = PromptTemplate(prompt_template_str).to_rich_prompt()
model = "luminous-base-control"

input = TextHighlightInput(
rich_prompt=prompt_with_metadata,
Expand Down
7 changes: 2 additions & 5 deletions tests/core/test_tracer.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
from pathlib import Path

from aleph_alpha_client import Prompt
from aleph_alpha_client.completion import CompletionRequest
from opentelemetry.trace import get_tracer
from pytest import fixture, mark

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core import (
CompleteInput,
CompositeTracer,
Expand All @@ -24,6 +20,7 @@
from intelligence_layer.core.model import LuminousControlModel, _Complete


@fixture
def complete(luminous_control_model: LuminousControlModel) -> _Complete:
return luminous_control_model._complete

Expand Down Expand Up @@ -95,7 +92,7 @@ def test_task_automatically_logs_input_and_output(complete: _Complete) -> None:
assert len(tracer.entries) == 1
task_span = tracer.entries[0]
assert isinstance(task_span, InMemoryTaskSpan)
assert task_span.name == "Complete"
assert task_span.name == type(complete).__name__
assert task_span.input == input
assert task_span.output == output
assert task_span.start_timestamp and task_span.end_timestamp
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
)
from intelligence_layer.core.instruct import InstructInput
from intelligence_layer.core.model import CompleteOutput
from intelligence_layer.core.prompt_template import RichPrompt
from intelligence_layer.core.tracer import utc_now
from intelligence_layer.evaluation import (
ArgillaEvaluationRepository,
Expand Down
3 changes: 0 additions & 3 deletions tests/use_cases/classify/test_prompt_based_classify.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@

from pytest import fixture

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core import Chunk, InMemoryTracer, NoOpTracer
from intelligence_layer.core.model import LuminousControlModel
from intelligence_layer.evaluation import (
Expand Down
3 changes: 0 additions & 3 deletions tests/use_cases/qa/test_long_context_qa.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
from pytest import fixture

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.model import LuminousControlModel
from intelligence_layer.core.tracer import NoOpTracer
from intelligence_layer.use_cases.qa.long_context_qa import (
Expand Down
3 changes: 0 additions & 3 deletions tests/use_cases/qa/test_multiple_chunk_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@

from pytest import fixture

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.chunk import Chunk
from intelligence_layer.core.detect_language import Language
from intelligence_layer.core.tracer import NoOpTracer
Expand Down
3 changes: 0 additions & 3 deletions tests/use_cases/summarize/conftest.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
from pytest import fixture

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.chunk import Chunk
from intelligence_layer.core.model import LuminousControlModel
from intelligence_layer.use_cases.summarize.steerable_long_context_summarize import (
Expand Down
5 changes: 1 addition & 4 deletions tests/use_cases/summarize/test_recursive_summarize.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
from aleph_alpha_client import Client, CompletionRequest, CompletionResponse
from pytest import fixture

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core import NoOpTracer
from intelligence_layer.core.model import LuminousControlModel
from intelligence_layer.use_cases import LongContextSummarizeInput, RecursiveSummarize
Expand Down Expand Up @@ -63,7 +60,7 @@ def test_recursive_summarize_stops_when_num_partial_summaries_stays_same(
task = RecursiveSummarize(steerable_long_context_summarize)
output = task.run(input, NoOpTracer())

assert output.generated_tokens > 145
assert output.generated_tokens > 50


def test_recursive_summarize_stops_after_one_chunk(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
from pytest import fixture, mark

from intelligence_layer.connectors import AlephAlphaClientProtocol
from intelligence_layer.core import Language, NoOpTracer
from intelligence_layer.core.model import LuminousControlModel
from intelligence_layer.use_cases import (
Expand Down
4 changes: 0 additions & 4 deletions tests/use_cases/test_intelligence_starter_app.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
import os

from aleph_alpha_client import Client
from dotenv import load_dotenv
from fastapi import FastAPI, testclient
from pytest import fixture

Expand Down

0 comments on commit df55c13

Please sign in to comment.