From 9c66971514bd7425a53183b20a71d2a7b43cdb1a Mon Sep 17 00:00:00 2001 From: pitneitemeier Date: Tue, 20 Feb 2024 16:43:26 +0100 Subject: [PATCH] removed PromptWithMetadata in favor of RichPrompt which extends Prompt --- src/examples/classification.ipynb | 2 +- src/intelligence_layer/core/__init__.py | 2 +- src/intelligence_layer/core/complete.py | 26 ++++------ src/intelligence_layer/core/echo.py | 2 +- .../core/prompt_template.py | 17 ++----- src/intelligence_layer/core/text_highlight.py | 16 +++--- .../use_cases/qa/single_chunk_qa.py | 8 +-- tests/core/test_complete.py | 2 +- tests/core/test_prompt_template.py | 50 +++++++++---------- tests/core/test_text_highlight.py | 28 +++++------ ...t_instruct_comparison_argilla_evaluator.py | 6 +-- 11 files changed, 68 insertions(+), 91 deletions(-) diff --git a/src/examples/classification.ipynb b/src/examples/classification.ipynb index db9b0c178..0883b9a3d 100644 --- a/src/examples/classification.ipynb +++ b/src/examples/classification.ipynb @@ -443,7 +443,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/src/intelligence_layer/core/__init__.py b/src/intelligence_layer/core/__init__.py index 64484b59d..5626fa9bf 100644 --- a/src/intelligence_layer/core/__init__.py +++ b/src/intelligence_layer/core/__init__.py @@ -33,7 +33,7 @@ from .prompt_template import PromptItemCursor as PromptItemCursor from .prompt_template import PromptRange as PromptRange from .prompt_template import PromptTemplate as PromptTemplate -from .prompt_template import PromptWithMetadata as PromptWithMetadata +from .prompt_template import RichPrompt as RichPrompt from .prompt_template import TextCursor as TextCursor from .task import Input as Input from .task import Output as Output diff --git a/src/intelligence_layer/core/complete.py b/src/intelligence_layer/core/complete.py index 85835b308..96e232361 100644 --- a/src/intelligence_layer/core/complete.py +++ b/src/intelligence_layer/core/complete.py @@ -6,7 +6,7 @@ from intelligence_layer.connectors.limited_concurrency_client import ( AlephAlphaClientProtocol, ) -from intelligence_layer.core.prompt_template import PromptTemplate, PromptWithMetadata +from intelligence_layer.core.prompt_template import PromptTemplate, RichPrompt from intelligence_layer.core.task import Task from intelligence_layer.core.tracer import TaskSpan @@ -90,14 +90,14 @@ class PromptOutput(CompleteOutput): Attributes: response: The generated response to the instruction. - prompt_with_metadata: To handle the instruction, a `PromptTemplate` is used. + rich_prompt: To handle the instruction, a `PromptTemplate` is used. The template defines two `PromptRange` instances: - "instruction": covering the instruction text. - "input": covering the input text. These can for example be used for downstream `TextHighlight` tasks. """ - prompt_with_metadata: PromptWithMetadata + rich_prompt: RichPrompt class Instruct(Task[InstructInput, PromptOutput]): @@ -147,22 +147,18 @@ def __init__(self, client: AlephAlphaClientProtocol, model: str) -> None: self._model = model def do_run(self, input: InstructInput, task_span: TaskSpan) -> PromptOutput: - prompt_with_metadata = PromptTemplate( - self.INSTRUCTION_PROMPT_TEMPLATE - ).to_prompt_with_metadata( + prompt = PromptTemplate(self.INSTRUCTION_PROMPT_TEMPLATE).to_rich_prompt( input=input.input, instruction=input.instruction, response_prefix=input.response_prefix, ) response = self._complete( - prompt_with_metadata.prompt, + prompt, input.maximum_response_tokens, self._model, task_span, ) - return PromptOutput( - response=response, prompt_with_metadata=prompt_with_metadata - ) + return PromptOutput(response=response, rich_prompt=prompt) def _complete( self, prompt: Prompt, maximum_tokens: int, model: str, task_span: TaskSpan @@ -274,9 +270,7 @@ def __init__(self, client: AlephAlphaClientProtocol, model: str) -> None: self._model = model def do_run(self, input: FewShotInput, task_span: TaskSpan) -> PromptOutput: - prompt_with_metadata = PromptTemplate( - self.FEW_SHOT_PROMPT_TEMPLATE - ).to_prompt_with_metadata( + prompt = PromptTemplate(self.FEW_SHOT_PROMPT_TEMPLATE).to_rich_prompt( instruction=input.few_shot_config.instruction, input=input.input, few_shot_examples=[ @@ -286,15 +280,13 @@ def do_run(self, input: FewShotInput, task_span: TaskSpan) -> PromptOutput: response_prefix=input.few_shot_config.response_prefix, ) response = self._complete( - prompt_with_metadata.prompt, + prompt, input.maximum_response_tokens, input.few_shot_config.additional_stop_sequences, self._model, task_span, ) - return PromptOutput( - response=response, prompt_with_metadata=prompt_with_metadata - ) + return PromptOutput(response=response, rich_prompt=prompt) def _complete( self, diff --git a/src/intelligence_layer/core/echo.py b/src/intelligence_layer/core/echo.py index b9f5c372a..1035cc634 100644 --- a/src/intelligence_layer/core/echo.py +++ b/src/intelligence_layer/core/echo.py @@ -88,7 +88,7 @@ def do_run(self, input: EchoInput, task_span: TaskSpan) -> EchoOutput: input.expected_completion, input.model ) prompt_template = PromptTemplate(self.PROMPT_TEMPLATE_STR) - prompt = prompt_template.to_prompt( + prompt = prompt_template.to_rich_prompt( prompt=prompt_template.embed_prompt(input.prompt), expected_completion=prompt_template.placeholder( Tokens.from_token_ids( diff --git a/src/intelligence_layer/core/prompt_template.py b/src/intelligence_layer/core/prompt_template.py index 53dab69d8..6d2970871 100644 --- a/src/intelligence_layer/core/prompt_template.py +++ b/src/intelligence_layer/core/prompt_template.py @@ -76,16 +76,14 @@ class PromptRange: end: Cursor -@dataclass(frozen=True) -class PromptWithMetadata: +@dataclass +class RichPrompt(Prompt): """The `Prompt` along with some metadata generated when a `PromptTemplate` is turned into a `Prompt`. Args: - prompt: The actual `Prompt`. ranges: A mapping of range name to a `Sequence` of corresponding `PromptRange` instances. """ - prompt: Prompt ranges: Mapping[str, Sequence[PromptRange]] @@ -278,7 +276,7 @@ def embed_prompt(self, prompt: Prompt) -> str: last_item = item return prompt_text - def to_prompt_with_metadata(self, **kwargs: Any) -> PromptWithMetadata: + def to_rich_prompt(self, **kwargs: Any) -> RichPrompt: """Creates a `Prompt` along with metadata from the template string and the given parameters. Currently the only metadata returned is information about ranges that are marked in the template. @@ -303,17 +301,10 @@ def to_prompt_with_metadata(self, **kwargs: Any) -> PromptWithMetadata: placeholder_indices, context.placeholder_range_names(), liquid_prompt ) - result = PromptWithMetadata(Prompt(modalities), placeholder_ranges) + result = RichPrompt(modalities, placeholder_ranges) self._reset_placeholder_state() return result - def to_prompt(self, **kwargs: Any) -> Prompt: - """Creates a `Prompt` from the template string and the given parameters. - - Provided parameters are passed to `liquid.Template.render`. - """ - return self.to_prompt_with_metadata(**kwargs).prompt - def _reset_placeholder_state(self) -> None: self._prompt_item_placeholders = {} diff --git a/src/intelligence_layer/core/text_highlight.py b/src/intelligence_layer/core/text_highlight.py index 435da7237..3929ea3f4 100644 --- a/src/intelligence_layer/core/text_highlight.py +++ b/src/intelligence_layer/core/text_highlight.py @@ -18,7 +18,7 @@ from intelligence_layer.core.prompt_template import ( Cursor, PromptRange, - PromptWithMetadata, + RichPrompt, TextCursor, ) from intelligence_layer.core.task import Task @@ -38,7 +38,7 @@ class TextHighlightInput(BaseModel): If this set is empty highlights of the entire prompt are returned. """ - prompt_with_metadata: PromptWithMetadata + rich_prompt: RichPrompt target: str model: str focus_ranges: frozenset[str] = frozenset() @@ -70,7 +70,7 @@ class TextHighlight(Task[TextHighlightInput, TextHighlightOutput]): """Generates text highlights given a prompt and completion. For a given prompt and target (completion), extracts the parts of the prompt responsible for generation. - A range can be provided in the input 'PromptWithMetadata' via use of the liquid language (see the example). + A range can be provided via use of the liquid language (see the example). In this case, the highlights will only refer to text within this range. Args: @@ -117,19 +117,19 @@ def do_run( ) -> TextHighlightOutput: self._raise_on_invalid_focus_range(input) explanation = self._explain( - prompt=input.prompt_with_metadata.prompt, + prompt=input.rich_prompt, target=input.target, model=input.model, task_span=task_span, ) prompt_ranges = self._flatten_prompt_ranges( range - for name, range in input.prompt_with_metadata.ranges.items() + for name, range in input.rich_prompt.ranges.items() if name in input.focus_ranges ) text_prompt_item_explanations_and_indices = ( self._extract_text_prompt_item_explanations_and_item_index( - input.prompt_with_metadata.prompt, explanation + input.rich_prompt, explanation ) ) highlights = self._to_highlights( @@ -140,9 +140,7 @@ def do_run( return TextHighlightOutput(highlights=highlights) def _raise_on_invalid_focus_range(self, input: TextHighlightInput) -> None: - unknown_focus_ranges = input.focus_ranges - set( - input.prompt_with_metadata.ranges.keys() - ) + unknown_focus_ranges = input.focus_ranges - set(input.rich_prompt.ranges.keys()) if unknown_focus_ranges: raise ValueError(f"Unknown focus ranges: {', '.join(unknown_focus_ranges)}") diff --git a/src/intelligence_layer/use_cases/qa/single_chunk_qa.py b/src/intelligence_layer/use_cases/qa/single_chunk_qa.py index c4982750d..55e9142c3 100644 --- a/src/intelligence_layer/use_cases/qa/single_chunk_qa.py +++ b/src/intelligence_layer/use_cases/qa/single_chunk_qa.py @@ -9,7 +9,7 @@ from intelligence_layer.core.chunk import Chunk from intelligence_layer.core.complete import Instruct, InstructInput, PromptOutput from intelligence_layer.core.detect_language import Language, language_config -from intelligence_layer.core.prompt_template import PromptWithMetadata +from intelligence_layer.core.prompt_template import RichPrompt from intelligence_layer.core.task import Task from intelligence_layer.core.text_highlight import TextHighlight, TextHighlightInput from intelligence_layer.core.tracer import TaskSpan @@ -132,7 +132,7 @@ def do_run( answer = self._no_answer_to_none(output.completion.strip()) highlights = ( self._get_highlights( - output.prompt_with_metadata, + output.rich_prompt, output.completion, task_span, ) @@ -158,12 +158,12 @@ def _generate_answer( def _get_highlights( self, - prompt_with_metadata: PromptWithMetadata, + prompt_with_metadata: RichPrompt, completion: str, task_span: TaskSpan, ) -> Sequence[str]: highlight_input = TextHighlightInput( - prompt_with_metadata=prompt_with_metadata, + rich_prompt=prompt_with_metadata, target=completion, model=self._model, focus_ranges=frozenset({"input"}), diff --git a/tests/core/test_complete.py b/tests/core/test_complete.py index cafaa3b1b..cf9e10249 100644 --- a/tests/core/test_complete.py +++ b/tests/core/test_complete.py @@ -33,7 +33,7 @@ def test_instruct_without_input(instruct: Instruct, no_op_tracer: NoOpTracer) -> output = instruct.run(input, no_op_tracer) assert "Berlin" in output.completion - prompt_text_item = output.prompt_with_metadata.prompt.items[0] + prompt_text_item = output.rich_prompt.items[0] assert isinstance(prompt_text_item, Text) assert "Input" not in prompt_text_item.text diff --git a/tests/core/test_prompt_template.py b/tests/core/test_prompt_template.py index 2a2ed9ab7..74db3f3d3 100644 --- a/tests/core/test_prompt_template.py +++ b/tests/core/test_prompt_template.py @@ -24,7 +24,7 @@ def test_to_prompt_with_text_array() -> None: ) names = ["World", "Rutger"] - prompt = template.to_prompt(names=names) + prompt = template.to_rich_prompt(names=names) expected = "".join([f"Hello {name}!\n" for name in names]) assert prompt == Prompt.from_text(expected) @@ -40,7 +40,7 @@ def test_to_prompt_with_invalid_input() -> None: ) with raises(LiquidTypeError): - template.to_prompt(names=7) + template.to_rich_prompt(names=7) def test_to_prompt_with_single_image(prompt_image: Image) -> None: @@ -51,7 +51,7 @@ def test_to_prompt_with_single_image(prompt_image: Image) -> None: """ ) - prompt = template.to_prompt(whatever=template.placeholder(prompt_image)) + prompt = template.to_rich_prompt(whatever=template.placeholder(prompt_image)) expected = Prompt( [ @@ -72,7 +72,7 @@ def test_to_prompt_with_image_sequence(prompt_image: Image) -> None: """ ) - prompt = template.to_prompt( + prompt = template.to_rich_prompt( images=[template.placeholder(prompt_image), template.placeholder(prompt_image)] ) @@ -83,7 +83,7 @@ def test_to_prompt_with_image_sequence(prompt_image: Image) -> None: def test_to_prompt_with_mixed_modality_variables(prompt_image: Image) -> None: template = PromptTemplate("""{{image}}{{name}}{{image}}""") - prompt = template.to_prompt( + prompt = template.to_rich_prompt( image=template.placeholder(prompt_image), name="whatever" ) @@ -94,7 +94,7 @@ def test_to_prompt_with_mixed_modality_variables(prompt_image: Image) -> None: def test_to_prompt_with_unused_image(prompt_image: Image) -> None: template = PromptTemplate("cool") - prompt = template.to_prompt(images=template.placeholder(prompt_image)) + prompt = template.to_rich_prompt(images=template.placeholder(prompt_image)) assert prompt == Prompt.from_text("cool") @@ -105,7 +105,7 @@ def test_to_prompt_with_multiple_different_images(prompt_image: Image) -> None: template = PromptTemplate("""{{image_1}}{{image_2}}""") - prompt = template.to_prompt( + prompt = template.to_rich_prompt( image_1=template.placeholder(prompt_image), image_2=template.placeholder(second_image), ) @@ -118,7 +118,7 @@ def test_to_prompt_with_embedded_prompt(prompt_image: Image) -> None: template = PromptTemplate("""{{user_prompt}}""") - prompt = template.to_prompt(user_prompt=template.embed_prompt(user_prompt)) + prompt = template.to_rich_prompt(user_prompt=template.embed_prompt(user_prompt)) assert prompt == user_prompt @@ -128,7 +128,7 @@ def test_to_prompt_does_not_add_whitespace_after_image(prompt_image: Image) -> N template = PromptTemplate("{{user_prompt}}") - prompt = template.to_prompt(user_prompt=template.embed_prompt(user_prompt)) + prompt = template.to_rich_prompt(user_prompt=template.embed_prompt(user_prompt)) assert prompt == user_prompt @@ -140,7 +140,7 @@ def test_to_prompt_skips_empty_strings() -> None: template = PromptTemplate("{{user_prompt}}") - prompt = template.to_prompt(user_prompt=template.embed_prompt(user_prompt)) + prompt = template.to_rich_prompt(user_prompt=template.embed_prompt(user_prompt)) assert prompt == Prompt([Text.from_text("Cool Also cool")]) @@ -152,7 +152,7 @@ def test_to_prompt_adds_whitespaces() -> None: template = PromptTemplate("{{user_prompt}}") - prompt = template.to_prompt(user_prompt=template.embed_prompt(user_prompt)) + prompt = template.to_rich_prompt(user_prompt=template.embed_prompt(user_prompt)) assert prompt == Prompt([Text.from_text("start middle end")]) @@ -168,21 +168,21 @@ def test_to_prompt_works_with_tokens() -> None: template = PromptTemplate("{{user_prompt}}") - prompt = template.to_prompt(user_prompt=template.embed_prompt(user_prompt)) + prompt = template.to_rich_prompt(user_prompt=template.embed_prompt(user_prompt)) assert prompt == user_prompt def test_to_prompt_with_empty_template() -> None: - assert PromptTemplate("").to_prompt() == Prompt([]) + assert PromptTemplate("").to_rich_prompt() == Prompt([]) def test_to_prompt_resets_template(prompt_image: Image) -> None: template = PromptTemplate("{{image}}") placeholder = template.placeholder(prompt_image) - prompt = template.to_prompt(image=placeholder) + prompt = template.to_rich_prompt(image=placeholder) - prompt_with_reset_template = template.to_prompt(image=placeholder) + prompt_with_reset_template = template.to_rich_prompt(image=placeholder) assert prompt_with_reset_template != prompt @@ -201,7 +201,7 @@ def test_to_prompt_data_returns_ranges(prompt_image: Image) -> None: "{{prefix_items}}{{prefix_text}}{% promptrange r1 %}{{embedded_text}}{{embedded_items}}{% endpromptrange %}", ) - prompt_data = template.to_prompt_with_metadata( + prompt_data = template.to_rich_prompt( prefix_items=template.embed_prompt(Prompt(prefix_items + [prefix_merged])), prefix_text=prefix_text, embedded_text=embedded_text, @@ -237,9 +237,7 @@ def test_to_prompt_data_returns_ranges_for_image_only_prompt( ).lstrip() ) - prompt_data = template.to_prompt_with_metadata( - image=template.placeholder(prompt_image) - ) + prompt_data = template.to_rich_prompt(image=template.placeholder(prompt_image)) r1 = prompt_data.ranges.get("r1") assert r1 == [PromptRange(start=PromptItemCursor(0), end=PromptItemCursor(0))] @@ -249,7 +247,7 @@ def test_to_prompt_data_returns_ranges_for_image_only_prompt( def test_to_prompt_data_returns_no_range_with_empty_template() -> None: template = PromptTemplate("{% promptrange r1 %}{% endpromptrange %}") - assert template.to_prompt_with_metadata().ranges.get("r1") == [] + assert template.to_rich_prompt().ranges.get("r1") == [] def test_to_prompt_data_returns_no_empty_ranges(prompt_image: Image) -> None: @@ -258,9 +256,9 @@ def test_to_prompt_data_returns_no_empty_ranges(prompt_image: Image) -> None: ) assert ( - template.to_prompt_with_metadata( - image=template.placeholder(prompt_image) - ).ranges.get("r1") + template.to_rich_prompt(image=template.placeholder(prompt_image)).ranges.get( + "r1" + ) == [] ) @@ -278,7 +276,7 @@ def test_to_prompt_data_returns_multiple_text_ranges_in_for_loop() -> None: "{% for i in (1..4) %}{% promptrange r1 %}{{embedded}}{% endpromptrange %}{% endfor %}" ) - prompt_data = template.to_prompt_with_metadata(embedded=embedded) + prompt_data = template.to_rich_prompt(embedded=embedded) assert prompt_data.ranges.get("r1") == [ PromptRange( @@ -296,9 +294,7 @@ def test_to_prompt_data_returns_multiple_imgae_ranges_in_for_loop( "{% for i in (1..4) %}{% promptrange r1 %}{{embedded}}{% endpromptrange %}{% endfor %}" ) - prompt_data = template.to_prompt_with_metadata( - embedded=template.placeholder(prompt_image) - ) + prompt_data = template.to_rich_prompt(embedded=template.placeholder(prompt_image)) assert prompt_data.ranges.get("r1") == [ PromptRange( diff --git a/tests/core/test_text_highlight.py b/tests/core/test_text_highlight.py index 08163e5ca..1efd162aa 100644 --- a/tests/core/test_text_highlight.py +++ b/tests/core/test_text_highlight.py @@ -22,11 +22,11 @@ def test_text_highlight(text_highlight: TextHighlight) -> None: This finding, while not complex extraterrestrial life, significantly raises the prospects of life's commonality in the universe. The international community is abuzz with plans for more focused research and potential interstellar missions.{% endpromptrange %} Answer:""" - prompt_with_metadata = PromptTemplate(prompt_template_str).to_prompt_with_metadata() + prompt_with_metadata = PromptTemplate(prompt_template_str).to_rich_prompt() model = "luminous-base-control" input = TextHighlightInput( - prompt_with_metadata=prompt_with_metadata, + rich_prompt=prompt_with_metadata, target=answer, model=model, focus_ranges=frozenset({"r1"}), @@ -51,12 +51,12 @@ def test_text_highlight_with_range_without_highlight( This finding, while not complex extraterrestrial life, significantly raises the prospects of life's commonality in the universe. The international community is abuzz with plans for more focused research and potential interstellar missions.{% endpromptrange %} Answer:""" - prompt_with_metadata = PromptTemplate(prompt_template_str).to_prompt_with_metadata( + prompt_with_metadata = PromptTemplate(prompt_template_str).to_rich_prompt( answer=answer ) input = TextHighlightInput( - prompt_with_metadata=prompt_with_metadata, + rich_prompt=prompt_with_metadata, target=f" {answer}", model="luminous-base", focus_ranges=frozenset(["no_content"]), @@ -69,12 +69,12 @@ def test_text_highlight_with_only_one_sentence(text_highlight: TextHighlight) -> prompt_template_str = """What is the Latin name of the brown bear? The answer is Ursus Arctos.{% promptrange r1 %} Explanation should not highlight anything.{% endpromptrange %} Answer:""" template = PromptTemplate(prompt_template_str) - prompt_with_metadata = template.to_prompt_with_metadata() + prompt_with_metadata = template.to_rich_prompt() completion = " Ursus Arctos" model = "luminous-base" input = TextHighlightInput( - prompt_with_metadata=prompt_with_metadata, + rich_prompt=prompt_with_metadata, target=completion, model=model, focus_ranges=frozenset({"r1"}), @@ -93,14 +93,14 @@ def test_text_highlight_with_image_prompt( {% endpromptrange %} Answer:""" template = PromptTemplate(prompt_template_str) - prompt_with_metadata = template.to_prompt_with_metadata( + prompt_with_metadata = template.to_rich_prompt( image=template.placeholder(prompt_image) ) completion = " The latin name of the brown bear is Ursus arctos." model = "luminous-base" input = TextHighlightInput( - prompt_with_metadata=prompt_with_metadata, target=completion, model=model + rich_prompt=prompt_with_metadata, target=completion, model=model ) output = text_highlight.run(input, NoOpTracer()) @@ -116,14 +116,14 @@ def test_text_highlight_without_range( Here is an image, just for LOLs: {{image}} Answer:""" template = PromptTemplate(prompt_template_str) - prompt_with_metadata = template.to_prompt_with_metadata( + prompt_with_metadata = template.to_rich_prompt( image=template.placeholder(prompt_image) ) completion = " The latin name of the brown bear is Ursus arctos." model = "luminous-base" input = TextHighlightInput( - prompt_with_metadata=prompt_with_metadata, target=completion, model=model + rich_prompt=prompt_with_metadata, target=completion, model=model ) output = text_highlight.run(input, NoOpTracer()) @@ -141,7 +141,7 @@ def test_text_highlight_without_focus_range_highlights_entire_prompt( The international community is abuzz with plans for more focused research and potential interstellar missions.{% endpromptrange %} Answer:""" template = PromptTemplate(prompt_template_str) - prompt_with_metadata = template.to_prompt_with_metadata( + prompt_with_metadata = template.to_rich_prompt( image=template.placeholder(prompt_image) ) answer = " Extreme conditions." @@ -149,7 +149,7 @@ def test_text_highlight_without_focus_range_highlights_entire_prompt( focus_ranges: frozenset[str] = frozenset() # empty input = TextHighlightInput( - prompt_with_metadata=prompt_with_metadata, + rich_prompt=prompt_with_metadata, target=answer, model=model, focus_ranges=focus_ranges, @@ -172,13 +172,13 @@ def test_text_highlight_with_unknown_range_raises( The international community is abuzz with plans for more focused research and potential interstellar missions.{% endpromptrange %} Answer:""" template = PromptTemplate(prompt_template_str) - prompt_with_metadata = template.to_prompt_with_metadata() + prompt_with_metadata = template.to_rich_prompt() answer = " Extreme conditions." model = "luminous-base" unknown_range_name = "bla" input = TextHighlightInput( - prompt_with_metadata=prompt_with_metadata, + rich_prompt=prompt_with_metadata, target=answer, model=model, focus_ranges=frozenset([unknown_range_name]), diff --git a/tests/evaluation/test_instruct_comparison_argilla_evaluator.py b/tests/evaluation/test_instruct_comparison_argilla_evaluator.py index 4308253b1..aa88f0bc4 100644 --- a/tests/evaluation/test_instruct_comparison_argilla_evaluator.py +++ b/tests/evaluation/test_instruct_comparison_argilla_evaluator.py @@ -3,7 +3,7 @@ from typing import Iterable, Sequence from uuid import uuid4 -from aleph_alpha_client import CompletionResponse, Prompt +from aleph_alpha_client import CompletionResponse from aleph_alpha_client.completion import CompletionResult from faker import Faker from pytest import fixture @@ -16,7 +16,7 @@ RecordData, ) from intelligence_layer.core.complete import InstructInput, PromptOutput -from intelligence_layer.core.prompt_template import PromptWithMetadata +from intelligence_layer.core.prompt_template import RichPrompt from intelligence_layer.core.tracer import utc_now from intelligence_layer.evaluation import ( ArgillaEvaluationRepository, @@ -107,7 +107,7 @@ def any_instruct_output() -> PromptOutput: num_tokens_generated=0, num_tokens_prompt_total=0, ), - prompt_with_metadata=PromptWithMetadata(prompt=Prompt([]), ranges={}), + rich_prompt=RichPrompt(items=[], ranges={}), )