Skip to content

Commit

Permalink
removed PromptWithMetadata in favor of RichPrompt which extends Prompt
Browse files Browse the repository at this point in the history
  • Loading branch information
pitneitemeier committed Feb 20, 2024
1 parent 9341d93 commit 9c66971
Show file tree
Hide file tree
Showing 11 changed files with 68 additions and 91 deletions.
2 changes: 1 addition & 1 deletion src/examples/classification.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -443,7 +443,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.11.4"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion src/intelligence_layer/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from .prompt_template import PromptItemCursor as PromptItemCursor
from .prompt_template import PromptRange as PromptRange
from .prompt_template import PromptTemplate as PromptTemplate
from .prompt_template import PromptWithMetadata as PromptWithMetadata
from .prompt_template import RichPrompt as RichPrompt
from .prompt_template import TextCursor as TextCursor
from .task import Input as Input
from .task import Output as Output
Expand Down
26 changes: 9 additions & 17 deletions src/intelligence_layer/core/complete.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core.prompt_template import PromptTemplate, PromptWithMetadata
from intelligence_layer.core.prompt_template import PromptTemplate, RichPrompt
from intelligence_layer.core.task import Task
from intelligence_layer.core.tracer import TaskSpan

Expand Down Expand Up @@ -90,14 +90,14 @@ class PromptOutput(CompleteOutput):
Attributes:
response: The generated response to the instruction.
prompt_with_metadata: To handle the instruction, a `PromptTemplate` is used.
rich_prompt: To handle the instruction, a `PromptTemplate` is used.
The template defines two `PromptRange` instances:
- "instruction": covering the instruction text.
- "input": covering the input text.
These can for example be used for downstream `TextHighlight` tasks.
"""

prompt_with_metadata: PromptWithMetadata
rich_prompt: RichPrompt


class Instruct(Task[InstructInput, PromptOutput]):
Expand Down Expand Up @@ -147,22 +147,18 @@ def __init__(self, client: AlephAlphaClientProtocol, model: str) -> None:
self._model = model

def do_run(self, input: InstructInput, task_span: TaskSpan) -> PromptOutput:
prompt_with_metadata = PromptTemplate(
self.INSTRUCTION_PROMPT_TEMPLATE
).to_prompt_with_metadata(
prompt = PromptTemplate(self.INSTRUCTION_PROMPT_TEMPLATE).to_rich_prompt(
input=input.input,
instruction=input.instruction,
response_prefix=input.response_prefix,
)
response = self._complete(
prompt_with_metadata.prompt,
prompt,
input.maximum_response_tokens,
self._model,
task_span,
)
return PromptOutput(
response=response, prompt_with_metadata=prompt_with_metadata
)
return PromptOutput(response=response, rich_prompt=prompt)

def _complete(
self, prompt: Prompt, maximum_tokens: int, model: str, task_span: TaskSpan
Expand Down Expand Up @@ -274,9 +270,7 @@ def __init__(self, client: AlephAlphaClientProtocol, model: str) -> None:
self._model = model

def do_run(self, input: FewShotInput, task_span: TaskSpan) -> PromptOutput:
prompt_with_metadata = PromptTemplate(
self.FEW_SHOT_PROMPT_TEMPLATE
).to_prompt_with_metadata(
prompt = PromptTemplate(self.FEW_SHOT_PROMPT_TEMPLATE).to_rich_prompt(
instruction=input.few_shot_config.instruction,
input=input.input,
few_shot_examples=[
Expand All @@ -286,15 +280,13 @@ def do_run(self, input: FewShotInput, task_span: TaskSpan) -> PromptOutput:
response_prefix=input.few_shot_config.response_prefix,
)
response = self._complete(
prompt_with_metadata.prompt,
prompt,
input.maximum_response_tokens,
input.few_shot_config.additional_stop_sequences,
self._model,
task_span,
)
return PromptOutput(
response=response, prompt_with_metadata=prompt_with_metadata
)
return PromptOutput(response=response, rich_prompt=prompt)

def _complete(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/intelligence_layer/core/echo.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def do_run(self, input: EchoInput, task_span: TaskSpan) -> EchoOutput:
input.expected_completion, input.model
)
prompt_template = PromptTemplate(self.PROMPT_TEMPLATE_STR)
prompt = prompt_template.to_prompt(
prompt = prompt_template.to_rich_prompt(
prompt=prompt_template.embed_prompt(input.prompt),
expected_completion=prompt_template.placeholder(
Tokens.from_token_ids(
Expand Down
17 changes: 4 additions & 13 deletions src/intelligence_layer/core/prompt_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,16 +76,14 @@ class PromptRange:
end: Cursor


@dataclass(frozen=True)
class PromptWithMetadata:
@dataclass
class RichPrompt(Prompt):
"""The `Prompt` along with some metadata generated when a `PromptTemplate` is turned into a `Prompt`.
Args:
prompt: The actual `Prompt`.
ranges: A mapping of range name to a `Sequence` of corresponding `PromptRange` instances.
"""

prompt: Prompt
ranges: Mapping[str, Sequence[PromptRange]]


Expand Down Expand Up @@ -278,7 +276,7 @@ def embed_prompt(self, prompt: Prompt) -> str:
last_item = item
return prompt_text

def to_prompt_with_metadata(self, **kwargs: Any) -> PromptWithMetadata:
def to_rich_prompt(self, **kwargs: Any) -> RichPrompt:
"""Creates a `Prompt` along with metadata from the template string and the given parameters.
Currently the only metadata returned is information about ranges that are marked in the template.
Expand All @@ -303,17 +301,10 @@ def to_prompt_with_metadata(self, **kwargs: Any) -> PromptWithMetadata:
placeholder_indices, context.placeholder_range_names(), liquid_prompt
)

result = PromptWithMetadata(Prompt(modalities), placeholder_ranges)
result = RichPrompt(modalities, placeholder_ranges)
self._reset_placeholder_state()
return result

def to_prompt(self, **kwargs: Any) -> Prompt:
"""Creates a `Prompt` from the template string and the given parameters.
Provided parameters are passed to `liquid.Template.render`.
"""
return self.to_prompt_with_metadata(**kwargs).prompt

def _reset_placeholder_state(self) -> None:
self._prompt_item_placeholders = {}

Expand Down
16 changes: 7 additions & 9 deletions src/intelligence_layer/core/text_highlight.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from intelligence_layer.core.prompt_template import (
Cursor,
PromptRange,
PromptWithMetadata,
RichPrompt,
TextCursor,
)
from intelligence_layer.core.task import Task
Expand All @@ -38,7 +38,7 @@ class TextHighlightInput(BaseModel):
If this set is empty highlights of the entire prompt are returned.
"""

prompt_with_metadata: PromptWithMetadata
rich_prompt: RichPrompt
target: str
model: str
focus_ranges: frozenset[str] = frozenset()
Expand Down Expand Up @@ -70,7 +70,7 @@ class TextHighlight(Task[TextHighlightInput, TextHighlightOutput]):
"""Generates text highlights given a prompt and completion.
For a given prompt and target (completion), extracts the parts of the prompt responsible for generation.
A range can be provided in the input 'PromptWithMetadata' via use of the liquid language (see the example).
A range can be provided via use of the liquid language (see the example).
In this case, the highlights will only refer to text within this range.
Args:
Expand Down Expand Up @@ -117,19 +117,19 @@ def do_run(
) -> TextHighlightOutput:
self._raise_on_invalid_focus_range(input)
explanation = self._explain(
prompt=input.prompt_with_metadata.prompt,
prompt=input.rich_prompt,
target=input.target,
model=input.model,
task_span=task_span,
)
prompt_ranges = self._flatten_prompt_ranges(
range
for name, range in input.prompt_with_metadata.ranges.items()
for name, range in input.rich_prompt.ranges.items()
if name in input.focus_ranges
)
text_prompt_item_explanations_and_indices = (
self._extract_text_prompt_item_explanations_and_item_index(
input.prompt_with_metadata.prompt, explanation
input.rich_prompt, explanation
)
)
highlights = self._to_highlights(
Expand All @@ -140,9 +140,7 @@ def do_run(
return TextHighlightOutput(highlights=highlights)

def _raise_on_invalid_focus_range(self, input: TextHighlightInput) -> None:
unknown_focus_ranges = input.focus_ranges - set(
input.prompt_with_metadata.ranges.keys()
)
unknown_focus_ranges = input.focus_ranges - set(input.rich_prompt.ranges.keys())
if unknown_focus_ranges:
raise ValueError(f"Unknown focus ranges: {', '.join(unknown_focus_ranges)}")

Expand Down
8 changes: 4 additions & 4 deletions src/intelligence_layer/use_cases/qa/single_chunk_qa.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from intelligence_layer.core.chunk import Chunk
from intelligence_layer.core.complete import Instruct, InstructInput, PromptOutput
from intelligence_layer.core.detect_language import Language, language_config
from intelligence_layer.core.prompt_template import PromptWithMetadata
from intelligence_layer.core.prompt_template import RichPrompt
from intelligence_layer.core.task import Task
from intelligence_layer.core.text_highlight import TextHighlight, TextHighlightInput
from intelligence_layer.core.tracer import TaskSpan
Expand Down Expand Up @@ -132,7 +132,7 @@ def do_run(
answer = self._no_answer_to_none(output.completion.strip())
highlights = (
self._get_highlights(
output.prompt_with_metadata,
output.rich_prompt,
output.completion,
task_span,
)
Expand All @@ -158,12 +158,12 @@ def _generate_answer(

def _get_highlights(
self,
prompt_with_metadata: PromptWithMetadata,
prompt_with_metadata: RichPrompt,
completion: str,
task_span: TaskSpan,
) -> Sequence[str]:
highlight_input = TextHighlightInput(
prompt_with_metadata=prompt_with_metadata,
rich_prompt=prompt_with_metadata,
target=completion,
model=self._model,
focus_ranges=frozenset({"input"}),
Expand Down
2 changes: 1 addition & 1 deletion tests/core/test_complete.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def test_instruct_without_input(instruct: Instruct, no_op_tracer: NoOpTracer) ->
output = instruct.run(input, no_op_tracer)

assert "Berlin" in output.completion
prompt_text_item = output.prompt_with_metadata.prompt.items[0]
prompt_text_item = output.rich_prompt.items[0]
assert isinstance(prompt_text_item, Text)
assert "Input" not in prompt_text_item.text

Expand Down
Loading

0 comments on commit 9c66971

Please sign in to comment.