diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 000000000..20014fe1c --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 04c1fabb6236dee285d067562705f386 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 000000000..bcb425be5 --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,119 @@ + + + + + + + Overview: module code — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+ +
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/connectors/document_index.html b/_modules/intelligence_layer/connectors/document_index.html new file mode 100644 index 000000000..84142628f --- /dev/null +++ b/_modules/intelligence_layer/connectors/document_index.html @@ -0,0 +1,236 @@ + + + + + + + intelligence_layer.connectors.document_index — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.connectors.document_index

+import json
+from typing import Any
+
+import requests
+
+
+
+[docs] +class DocumentIndex: + """Client for the Document Index allowing handling documents and search. + + Document Index is a tool for managing collections of documents, enabling operations such as creation, deletion, listing, and searching. + Documents can be stored either in the cloud or in a local deployment. + + Args: + token: A valid token for the document index API. + base_document_index_url: The url of the document index' API. + + Example: + >>> document_index = DocumentIndex(os.getenv("AA_TOKEN")) + >>> document_index.create_collection(namespace="my_namespace", collection="germany_facts_collection") + >>> document_index.add_document( + >>> namespace="my_namespace", + >>> collection="germany_facts_collection", + >>> name="Fun facts about Germany", + >>> content="Germany is a country located in ..." + >>> ) + >>> documents = document_index.search( + >>> namespace="my_namespace", + >>> collection="germany_facts_collection", + >>> query: "What is the capital of Germany", + >>> max_results=4, + >>> min_score: 0.5 + >>> ) + """ + + def __init__( + self, + token: str, + base_document_index_url: str = "https://knowledge.aleph-alpha.com", + ) -> None: + self._base_document_index_url = base_document_index_url + self.headers = { + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": f"Bearer {token}", + } + +
+[docs] + def create_collection(self, namespace: str, collection: str) -> None: + url = f"{self._base_document_index_url}/collections/{namespace}/{collection}" + response = requests.put(url, headers=self.headers) + response.raise_for_status()
+ + +
+[docs] + def delete_collection(self, namespace: str, collection: str) -> None: + url = f"{self._base_document_index_url}/collections/{namespace}/{collection}" + response = requests.delete(url, headers=self.headers) + response.raise_for_status()
+ + +
+[docs] + def add_document( + self, + namespace: str, + collection: str, + name: str, + content: str, + ) -> None: + url = f"{self._base_document_index_url}/collections/{namespace}/{collection}/docs/{name}" + data = { + "schema_version": "V1", + "contents": [{"modality": "text", "text": content}], + } + response = requests.put(url, data=json.dumps(data), headers=self.headers) + response.raise_for_status()
+ + +
+[docs] + def delete_document(self, namespace: str, collection: str, name: str) -> None: + url = f"{self._base_document_index_url}/collections/{namespace}/{collection}/docs/{name}" + response = requests.delete(url, headers=self.headers) + response.raise_for_status()
+ + +
+[docs] + def get_document( + self, namespace: str, collection: str, name: str, get_chunks: bool = False + ) -> Any: + if not get_chunks: + url = f"{self._base_document_index_url}/collections/{namespace}/{collection}/docs/{name}" + else: + url = f"{self._base_document_index_url}/collections/{namespace}/{collection}/docs/{name}/chunks" + response = requests.get(url, headers=self.headers) + response.raise_for_status() + return response.json()
+ + +
+[docs] + def list_documents(self, namespace: str, collection: str) -> Any: + url = ( + f"{self._base_document_index_url}/collections/{namespace}/{collection}/docs" + ) + response = requests.get(url, headers=self.headers) + response.raise_for_status() + return response.json()
+ + +
+[docs] + def search( + self, + namespace: str, + collection: str, + query: str, + max_results: int, + min_score: float, + ) -> Any: + url = f"{self._base_document_index_url}/collections/{namespace}/{collection}/search" + data = { + "query": [{"modality": "text", "text": query}], + "max_results": max_results, + "min_score": min_score, + "filter": [{"with": [{"modality": "text"}]}], + } + response = requests.post(url, data=json.dumps(data), headers=self.headers) + response.raise_for_status() + return response.json()
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/connectors/retrievers/base_retriever.html b/_modules/intelligence_layer/connectors/retrievers/base_retriever.html new file mode 100644 index 000000000..3c631dd23 --- /dev/null +++ b/_modules/intelligence_layer/connectors/retrievers/base_retriever.html @@ -0,0 +1,153 @@ + + + + + + + intelligence_layer.connectors.retrievers.base_retriever — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.connectors.retrievers.base_retriever

+from abc import ABC, abstractmethod
+from typing import Any, Sequence
+
+from pydantic import BaseModel
+
+
+
+[docs] +class Document(BaseModel): + """Document abstraction, specifically for retrieval use cases. + + Attributes: + text: The document's text. + metadata: Any json-serializable object. + """ + + text: str + metadata: Any = None
+ + + +
+[docs] +class SearchResult(BaseModel): + """Contains a text alongside its search score. + + Attributes: + score: The similarity score between the text and the query that was searched with. + Will be between 0 and 1, where 0 means no similarity and 1 perfect similarity. + document: The document found by search. + """ + + score: float + document: Document
+ + + +
+[docs] +class BaseRetriever(ABC): + """General interface for any retriever. + + Retrievers are used to find texts given a user query. + Each Retriever implementation owns its own logic for retrieval. + For comparison purposes, we assume scores in the `SearchResult`s to be between 0 and 1. + """ + +
+[docs] + @abstractmethod + def get_relevant_documents_with_scores(self, query: str) -> Sequence[SearchResult]: + pass
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/connectors/retrievers/document_index_retriever.html b/_modules/intelligence_layer/connectors/retrievers/document_index_retriever.html new file mode 100644 index 000000000..c91cc59bc --- /dev/null +++ b/_modules/intelligence_layer/connectors/retrievers/document_index_retriever.html @@ -0,0 +1,161 @@ + + + + + + + intelligence_layer.connectors.retrievers.document_index_retriever — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.connectors.retrievers.document_index_retriever

+from typing import Sequence
+
+from intelligence_layer.connectors.document_index import DocumentIndex
+from intelligence_layer.connectors.retrievers.base_retriever import (
+    BaseRetriever,
+    Document,
+    SearchResult,
+)
+
+
+
+[docs] +class DocumentIndexRetriever(BaseRetriever): + """Search through documents within collections in the `DocumentIndex`. + + We initialize this Retriever with a collection & namespace names, and we can find the documents in the collection + most semanticly similar to our query. + + Args: + document_index: Client offering functionality for search. + namespace: The namespace within the `DocumentIndex` where all collections are stored. + collection: The collection within the namespace that holds the desired documents. + k: The (top) number of documents to be returned by search. + threshold: The mimumum value of cosine similarity between the query vector and the document vector. + + Example: + >>> document_index = DocumentIndex(os.getenv("AA_TOKEN")) + >>> retriever = DocumentIndexRetriever(document_index, "my_namespace", "airplane_facts_collection", 3) + >>> query = "Who invented the airplane?" + >>> documents = retriever.get_relevant_documents_with_scores(query) + """ + + def __init__( + self, + document_index: DocumentIndex, + namespace: str, + collection: str, + k: int, + threshold: float = 0.5, + ) -> None: + self._document_index = document_index + self._namespace = namespace + self._collection = collection + self._k = k + self._threshold = threshold + +
+[docs] + def get_relevant_documents_with_scores(self, query: str) -> Sequence[SearchResult]: + response = self._document_index.search( + self._namespace, self._collection, query, self._k, self._threshold + ) + relevant_chunks = [ + SearchResult( + score=result["score"], + document=Document(text=result["section"][0]["text"], metadata=None), + ) + for result in response + ] + return relevant_chunks
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/connectors/retrievers/in_memory_retriever.html b/_modules/intelligence_layer/connectors/retrievers/in_memory_retriever.html new file mode 100644 index 000000000..b6a7aa8d5 --- /dev/null +++ b/_modules/intelligence_layer/connectors/retrievers/in_memory_retriever.html @@ -0,0 +1,250 @@ + + + + + + + intelligence_layer.connectors.retrievers.in_memory_retriever — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.connectors.retrievers.in_memory_retriever

+from concurrent.futures import ThreadPoolExecutor
+from enum import Enum
+from typing import Sequence
+
+from aleph_alpha_client import (
+    Client,
+    Prompt,
+    SemanticRepresentation,
+    SemanticEmbeddingRequest,
+)
+from qdrant_client import QdrantClient
+from qdrant_client.conversions.common_types import ScoredPoint
+from qdrant_client.http.models import Distance, VectorParams, PointStruct, models
+
+from intelligence_layer.connectors.retrievers.base_retriever import (
+    BaseRetriever,
+    Document,
+    SearchResult,
+)
+
+
+
+[docs] +class RetrieverType(Enum): + """Specify the type of retriever to instantiate. + + Attributes: + ASYMMETRIC: Query is embedded as `Query` and each document as `Document`. + SYMMETRIC: Both query and documents will be embedded as `Symmetric`. + """ + + ASYMMETRIC = (SemanticRepresentation.Query, SemanticRepresentation.Document) + SYMMETRIC = (SemanticRepresentation.Symmetric, SemanticRepresentation.Symmetric)
+ + + +
+[docs] +class InMemoryRetriever(BaseRetriever): + """Search through documents stored in memory using semantic search. + + This retriever uses a [Qdrant](https://github.com/qdrant/qdrant)-in-Memory vector store instance to store documents and their asymmetric embeddings. + When run, the given query is embedded and scored against the document embeddings to retrieve the k-most similar matches by cosine similarity. + + Args: + client: Aleph Alpha client instance for running model related API calls. + texts: The sequence of texts to be made searchable. + k: The (top) number of documents to be returned by search. + threshold: The mimumum value of cosine similarity between the query vector and the document vector. + retriever_type: The type of retriever to be instantiated. + Should be `ASYMMETRIC` for most query-document retrieveal use cases, `SYMMETRIC` is optimized + for similar document retrieval. + + Example: + >>> client = Client(os.getenv("AA_TOKEN")) + >>> documents = [Document(text=t) for t in ["I do not like rain.", "Summer is warm.", "We are so back."]] + >>> retriever = InMemoryRetriever(client, documents) + >>> query = "Do you like summer?" + >>> documents = retriever.get_relevant_documents_with_scores(query) + """ + + MAX_WORKERS = 10 + + def __init__( + self, + client: Client, + documents: Sequence[Document], + k: int, + threshold: float = 0.5, + retriever_type: RetrieverType = RetrieverType.ASYMMETRIC, + ) -> None: + self._client = client + self._search_client = QdrantClient(":memory:") + self._collection_name = "in_memory_collection" + self._k = k + self._threshold = threshold + self._query_representation = retriever_type.value[0] + self._document_representation = retriever_type.value[1] + + self._search_client.recreate_collection( + collection_name=self._collection_name, + vectors_config=VectorParams(size=128, distance=Distance.COSINE), + ) + self._add_texts_to_memory(documents) + +
+[docs] + def get_relevant_documents_with_scores(self, query: str) -> Sequence[SearchResult]: + query_embedding = self._embed(query, self._query_representation) + search_result = self._search_client.search( + collection_name=self._collection_name, + query_vector=query_embedding, + score_threshold=self._threshold, + limit=self._k, + ) + return [self._point_to_search_result(point) for point in search_result]
+ + + def _embed(self, text: str, representation: SemanticRepresentation) -> list[float]: + embedding_request = SemanticEmbeddingRequest( + prompt=Prompt.from_text(text), + representation=representation, + compress_to_size=128, + normalize=True, + ) + return self._client.semantic_embed( + request=embedding_request, model="luminous-base" + ).embedding + + @staticmethod + def _point_to_search_result(point: ScoredPoint) -> SearchResult: + assert point.payload + return SearchResult(score=point.score, document=Document(**point.payload)) + + def _add_texts_to_memory(self, documents: Sequence[Document]) -> None: + with ThreadPoolExecutor(max_workers=self.MAX_WORKERS) as executor: + embeddings = list( + executor.map( + lambda c: self._embed(c.text, self._document_representation), + documents, + ) + ) + self._search_client.upsert( + collection_name=self._collection_name, + wait=True, + points=[ + PointStruct( + id=idx, vector=text_embedding, payload=document.model_dump() + ) + for idx, (text_embedding, document) in enumerate( + zip(embeddings, documents) + ) + ], + ) + +
+[docs] + def get_filtered_documents_with_scores( + self, query: str, filter: models.Filter + ) -> Sequence[SearchResult]: + """Specific method for `InMemoryRetriever` to support filtering search results.""" + query_embedding = self._embed(query, self._query_representation) + search_result = self._search_client.search( + collection_name=self._collection_name, + query_vector=query_embedding, + limit=self._k, + query_filter=filter, + ) + return [self._point_to_search_result(point) for point in search_result]
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/core/complete.html b/_modules/intelligence_layer/core/complete.html new file mode 100644 index 000000000..18c6eafc8 --- /dev/null +++ b/_modules/intelligence_layer/core/complete.html @@ -0,0 +1,288 @@ + + + + + + + intelligence_layer.core.complete — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.core.complete

+from typing import Optional
+
+from aleph_alpha_client import Client, CompletionRequest, CompletionResponse, Prompt
+from pydantic import BaseModel
+
+from intelligence_layer.core.prompt_template import PromptTemplate, PromptWithMetadata
+from intelligence_layer.core.task import Task
+from intelligence_layer.core.logger import DebugLogger
+
+
+
+[docs] +class CompleteInput(BaseModel): + """The input for a `Complete` task. + + Attributes: + request: Aleph Alpha `Client`'s `CompletionRequest`. This gives fine grained control + over all completion parameters that are supported by Aleph Alpha's inference API. + model: A valid Aleph Alpha model name. + """ + + request: CompletionRequest + model: str
+ + + +
+[docs] +class CompleteOutput(BaseModel): + """The output of a `Complete` task. + + Attributes: + response: Aleph Alpha `Client`'s `CompletionResponse` containing all details + provided by Aleph Alpha's inference API. + """ + + response: CompletionResponse + + @property + def completion(self) -> str: + return self.response.completions[0].completion or ""
+ + + +
+[docs] +class Complete(Task[CompleteInput, CompleteOutput]): + """Performs a completion request with access to all possible request parameters. + + Only use this task if non of the higher level tasks defined below works for + you, as your completion request does not fit to the use-cases the higher level ones represent or + you need to control request-parameters that are not exposed by them. + + Args: + client: Aleph Alpha client instance for running model related API calls. + """ + + def __init__(self, client: Client) -> None: + super().__init__() + self._client = client + +
+[docs] + def run(self, input: CompleteInput, logger: DebugLogger) -> CompleteOutput: + response = self._client.complete( + input.request, + model=input.model, + ) + return CompleteOutput(response=response)
+
+ + + +
+[docs] +class InstructInput(BaseModel): + """The input for an `Instruct`. + + Attributes: + instruction: A textual instruction for the model. + Could be a directive to answer a question or to translate something. + input: The text input for the instruction, e.g. a text to be translated. + model: The name of the model that should handle the instruction. + Certain models are optimized for handling such instruction tasks. + Typically their name contains 'control', e.g. 'luminous-extended-control'. + response_prefix: A string that is provided to the LLM as a prefix of the response. + This can steer the model completion. + maximum_response_tokens: The maximum number of tokens to be generated in the answer. + The default corresponds to roughly one short paragraph. + """ + + instruction: str + input: Optional[str] + model: str + response_prefix: str = "" + maximum_response_tokens: int = 64
+ + + +
+[docs] +class InstructOutput(BaseModel): + """The output of an `Instruct`. + + Attributes: + response: The generated response to the instruction. + prompt_with_metadata: To handle the instruction, a `PromptTemplate` is used. + The template defines two `PromptRange`s: + - "instruction": covering the instruction text as provided in the `InstructionInput`. + - "input": covering the input text as provided in the `InstructionInput`. + These can for example be used for downstream `TextHighlight` tasks. + """ + + response: str + prompt_with_metadata: PromptWithMetadata
+ + + +
+[docs] +class Instruct(Task[InstructInput, InstructOutput]): + """Runs zero-shot instruction completions on a model. + + Can be used for various types of instructions a LLM could handle, like QA, summarization, + translation and more. + + Args: + client: Aleph Alpha client instance for running model related API calls. + + Attributes: + INSTRUCTION_PROMPT_TEMPLATE: The prompt-template used to build the actual `Prompt` sent + to the inference API. + + Example: + >>> client = Client(os.getenv("AA_TOKEN")) + >>> task = Instruction(client) + >>> input = InstructionInput( + >>> instruction="Translate the following to text to German.", + >>> input="An apple a day keeps the doctor away." + >>> ) + >>> logger = InMemoryLogger(name="Instruction") + >>> output = task.run(input, logger) + >>> print(output.response) + Ein Apfel am Tag hält den Arzt fern. + """ + + INSTRUCTION_PROMPT_TEMPLATE = """### Instruction: +{% promptrange instruction %}{{instruction}}{% endpromptrange %} +{% if input %} +### Input: +{% promptrange input %}{{input}}{% endpromptrange %} +{% endif %} +### Response:{{response_prefix}}""" + + def __init__(self, client: Client) -> None: + super().__init__() + self._client = client + self._completion = Complete(client) + +
+[docs] + def run(self, input: InstructInput, logger: DebugLogger) -> InstructOutput: + prompt_with_metadata = PromptTemplate( + self.INSTRUCTION_PROMPT_TEMPLATE + ).to_prompt_with_metadata( + input=input.input, + instruction=input.instruction, + response_prefix=input.response_prefix, + ) + completion = self._complete( + prompt_with_metadata.prompt, + input.maximum_response_tokens, + input.model, + logger, + ) + return InstructOutput( + response=completion, prompt_with_metadata=prompt_with_metadata + )
+ + + def _complete( + self, prompt: Prompt, maximum_tokens: int, model: str, logger: DebugLogger + ) -> str: + request = CompletionRequest(prompt, maximum_tokens=maximum_tokens) + return self._completion.run( + CompleteInput(request=request, model=model), + logger, + ).completion
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/core/echo.html b/_modules/intelligence_layer/core/echo.html new file mode 100644 index 000000000..473bd0058 --- /dev/null +++ b/_modules/intelligence_layer/core/echo.html @@ -0,0 +1,252 @@ + + + + + + + intelligence_layer.core.echo — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.core.echo

+from typing import Sequence
+
+from aleph_alpha_client import Client, CompletionRequest, Prompt, Tokens
+from pydantic import BaseModel
+import tokenizers  # type: ignore
+
+from intelligence_layer.core.complete import Complete, CompleteInput
+from intelligence_layer.core.logger import DebugLogger
+from intelligence_layer.core.prompt_template import PromptTemplate
+from intelligence_layer.core.task import LogProb, Probability, Task, Token
+
+
+
+[docs] +class TokenWithProb(BaseModel): + token: Token + prob: Probability | LogProb
+ + + +
+[docs] +class EchoInput(BaseModel): + """The input for an `EchoTask`. + + Attributes: + prompt: The input text that serves as the starting point for the LLM. + expected_completion: The desired completion based on the prompt. + The likelihood of the tokens in this will be examined. + model: A valid Aleph Alpha model name. + """ + + prompt: Prompt + expected_completion: str + model: str
+ + + +
+[docs] +class EchoOutput(BaseModel): + """The output of an `EchoTask`. + + Attributes: + tokens_with_log_probs: Every token of the `expected_completion` of the + `EchoInput` accompanied by its probability of having been generated + in a completion scenario. + """ + + tokens_with_log_probs: Sequence[TokenWithProb]
+ + + +
+[docs] +class EchoTask(Task[EchoInput, EchoOutput]): + """Task that returns probabilities of a completion given a prompt. + + Analyzes the likelihood of generating tokens in the expected completion based on + a given prompt and model. Does not generate any tokens. + + Args: + client: Aleph Alpha client instance for running model related API calls. + + Example: + >>> client = Client(token="AA_TOKEN") + >>> task = EchoTask(client) + >>> input = EchoTaskInput( + prompt="This is a ", + expected_completion="happy text", + model="luminous-base", + ) + >>> logger = InMemoryLogger(name="EchoTask") + >>> output = task.run(input, logger) + >>> print(output.tokens_with_log_probs[0]).prob + 0.6 + """ + + PROMPT_TEMPLATE: PromptTemplate = PromptTemplate( + "{{prompt}}{{expected_completion}}" + ) + + def __init__(self, client: Client) -> None: + super().__init__() + self._client = client + self._completion = Complete(client=client) + +
+[docs] + def run(self, input: EchoInput, logger: DebugLogger) -> EchoOutput: + # We tokenize the prompt separately so we don't have an overlap in the tokens. + # If we don't do this, the end of the prompt and expected completion can be merged into unexpected tokens. + expected_completion_tokens = self._tokenize( + input.expected_completion, input.model + ) + prompt = self.PROMPT_TEMPLATE.to_prompt( + prompt=self.PROMPT_TEMPLATE.embed_prompt(input.prompt), + expected_completion=self.PROMPT_TEMPLATE.placeholder( + Tokens.from_token_ids( + [token.token_id for token in expected_completion_tokens] + ) + ), + ) + completion_input = CompleteInput( + request=self._completion_request(prompt=prompt), + model=input.model, + ) + output = self._completion.run(completion_input, logger) + assert output.response.completions[0].log_probs + log_prob_dicts = output.response.completions[0].log_probs[ + -len(expected_completion_tokens) : + ] + tokens_with_prob = [] + for token, log_prob in zip( + expected_completion_tokens, log_prob_dicts, strict=True + ): + assert token.token in log_prob + tokens_with_prob.append( + TokenWithProb( + token=token, + prob=LogProb(log_prob.get(token.token, 0.0) or 0.0), + ) + ) + return EchoOutput(tokens_with_log_probs=tokens_with_prob)
+ + + def _completion_request( + self, + prompt: Prompt, + ) -> CompletionRequest: + return CompletionRequest( + prompt=prompt, + maximum_tokens=0, + log_probs=0, + tokens=True, + echo=True, + ) + + def _tokenize(self, text: str, model: str) -> Sequence[Token]: + # Turns the expected output into list of token ids. Important so that we know how many tokens + # the label is and can retrieve the last N log probs for the label + tokenizer = self._client.tokenizer(model) + assert tokenizer.pre_tokenizer + tokenizer.pre_tokenizer.add_prefix_space = False + encoding: tokenizers.Encoding = tokenizer.encode(text) + return [ + Token( + token=tokenizer.decode([token_id], skip_special_tokens=False), + token_id=token_id, + ) + for token_id in encoding.ids + ]
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/core/evaluator.html b/_modules/intelligence_layer/core/evaluator.html new file mode 100644 index 000000000..464e710df --- /dev/null +++ b/_modules/intelligence_layer/core/evaluator.html @@ -0,0 +1,208 @@ + + + + + + + intelligence_layer.core.evaluator — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.core.evaluator

+from abc import ABC, abstractmethod
+from concurrent.futures import ThreadPoolExecutor
+from typing import Generic, Optional, Sequence, TypeVar
+from uuid import uuid4
+
+from pydantic import BaseModel, Field
+from tqdm import tqdm
+
+from intelligence_layer.core.task import Input
+from intelligence_layer.core.logger import PydanticSerializable, DebugLogger
+
+
+ExpectedOutput = TypeVar("ExpectedOutput", bound=PydanticSerializable)
+Evaluation = TypeVar("Evaluation", bound=PydanticSerializable)
+AggregatedEvaluation = TypeVar("AggregatedEvaluation", bound=PydanticSerializable)
+
+
+
+[docs] +class Example(BaseModel, Generic[Input, ExpectedOutput]): + """Example case used for evaluations. + + Attributes: + input: Input for the task. Has to be same type as the input for the task used. + expected_output: The expected output from a given example run. + This will be used by the evaluator to compare the received output with. + ident: Identifier for the example, defaults to uuid. + """ + + input: Input + expected_output: ExpectedOutput + ident: Optional[str] = Field(default_factory=lambda: str(uuid4()))
+ + + +
+[docs] +class Dataset(BaseModel, Generic[Input, ExpectedOutput]): + """A dataset of examples used for evaluation of a task. + + Attributes: + name: This a human readable identifier for a dataset. + examples: The actual examples that a task will be evaluated on. + """ + + name: str + examples: Sequence[Example[Input, ExpectedOutput]]
+ + + +
+[docs] +class Evaluator(ABC, Generic[Input, ExpectedOutput, Evaluation, AggregatedEvaluation]): + """Base evaluator interface. This should run certain evaluation steps for some job. + + Generics: + Input: Interface to be passed to the task that shall be evaluated. + ExpectedOutput: Output that is expected from the task run with the supplied input. + Evaluation: Interface of the metrics that come from the evaluated task. + AggregatedEvaluation: The aggregated results of an evaluation run with a dataset. + + We suggest supplying a `Task` in the `__init__` method and running it in the `evaluate` method. + """ + +
+[docs] + @abstractmethod + def evaluate( + self, + input: Input, + logger: DebugLogger, + expected_output: ExpectedOutput, + ) -> Evaluation: + """Executes the evaluation for this use-case.""" + pass
+ + +
+[docs] + def evaluate_dataset( + self, dataset: Dataset[Input, ExpectedOutput], logger: DebugLogger + ) -> AggregatedEvaluation: + """Evaluates an entire datasets in a threaded manner and aggregates the results into an `AggregatedEvaluation`.""" + with ThreadPoolExecutor(max_workers=10) as executor: + evaluations = list( + tqdm( + executor.map( + lambda idx_example: self.evaluate( + idx_example.input, + logger, + idx_example.expected_output, + ), + dataset.examples, + ), + total=len(dataset.examples), + desc="Evaluating", + ) + ) + return self.aggregate(evaluations)
+ + +
+[docs] + @abstractmethod + def aggregate(self, evaluations: Sequence[Evaluation]) -> AggregatedEvaluation: + """`Evaluator`-specific method for aggregating individual `Evaluations` into report-like `Aggregated Evaluation`.""" + pass
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/core/explain.html b/_modules/intelligence_layer/core/explain.html new file mode 100644 index 000000000..41709068a --- /dev/null +++ b/_modules/intelligence_layer/core/explain.html @@ -0,0 +1,159 @@ + + + + + + + intelligence_layer.core.explain — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.core.explain

+from aleph_alpha_client import Client, ExplanationRequest, ExplanationResponse
+from pydantic import BaseModel
+
+from intelligence_layer.core.task import Task
+from intelligence_layer.core.logger import DebugLogger
+
+
+
+[docs] +class ExplainInput(BaseModel): + """The input for a `Explain` task. + + Attributes: + request: Aleph Alpha `Client`'s `ExplanationRequest`. This gives fine grained control + over all explanation parameters that are supported by Aleph Alpha's inference API. + model: A valid Aleph Alpha model name. + """ + + request: ExplanationRequest + model: str
+ + + +
+[docs] +class ExplainOutput(BaseModel): + """The output of a `Explain` task. + + Attributes: + response: Aleph Alpha `Client`'s `ExplanationResponse` containing all details + provided by Aleph Alpha's inference API. + """ + + response: ExplanationResponse
+ + + +
+[docs] +class Explain(Task[ExplainInput, ExplainOutput]): + """Performs an explanation request with access to all possible request parameters. + + Only use this task if non of the higher level tasks defined below works for + you, for example if the `TextHighlight` task does not fit your use case. + + Args: + client: Aleph Alpha client instance for running model related API calls. + """ + + def __init__(self, client: Client) -> None: + super().__init__() + self._client = client + +
+[docs] + def run(self, input: ExplainInput, logger: DebugLogger) -> ExplainOutput: + response = self._client.explain(input.request, input.model) + return ExplainOutput(response=response)
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/core/logger.html b/_modules/intelligence_layer/core/logger.html new file mode 100644 index 000000000..76f6370d0 --- /dev/null +++ b/_modules/intelligence_layer/core/logger.html @@ -0,0 +1,794 @@ + + + + + + + intelligence_layer.core.logger — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.core.logger

+from contextlib import AbstractContextManager
+from datetime import datetime
+from json import dumps
+from pathlib import Path
+from types import TracebackType
+from uuid import UUID, uuid4
+
+from pydantic import BaseModel, Field, RootModel, SerializeAsAny
+from rich.panel import Panel
+from rich.syntax import Syntax
+from rich.tree import Tree
+
+from typing_extensions import TypeAliasType, Self
+
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Mapping,
+    Optional,
+    Protocol,
+    Sequence,
+    Union,
+    runtime_checkable,
+)
+
+if TYPE_CHECKING:
+    PydanticSerializable = (
+        int
+        | float
+        | str
+        | Sequence["PydanticSerializable"]
+        | Mapping[str, "PydanticSerializable"]
+        | None
+        | bool
+        | BaseModel
+        | UUID
+    )
+else:
+    PydanticSerializable = TypeAliasType(
+        "PydanticSerializable",
+        int
+        | float
+        | str
+        | Sequence["PydanticSerializable"]
+        | Mapping[str, "PydanticSerializable"]
+        | None
+        | bool
+        | BaseModel
+        | UUID,
+    )
+
+
+
+[docs] +@runtime_checkable +class DebugLogger(Protocol): + """A protocol for instrumenting `Task`s with structured logging. + + A logger needs to provide a way to collect an individual log, which should be serializable, and + a way to generate nested loggers, so that sub-tasks can emit logs that are grouped together. + + Each `DebugLogger` is given a `name` to distinguish them from each other, and for nested logs. + + Implementations of how logs are collected and stored may differ. Refer to the individual + documentation of each implementation to see how to use the resulting logger. + """ + +
+[docs] + def log(self, message: str, value: PydanticSerializable) -> None: + """Record a log of relevant information as part of a step within a task. + + By default, the `Input` and `Output` of each `Task` are logged automatically, but you can + log anything else that seems relevant to understanding the output of a given task. + + Args: + message: A description of the value you are logging, such as the step in the task this + is related to. + value: The relevant data you want to log. Can be anything that is serializable by + Pydantic, which gives the loggers flexibility in how they store and emit the logs. + """ + ...
+ + +
+[docs] + def span(self, name: str) -> "Span": + """Generate a span from the current logging instance. + + Each logger implementation can decide on how it wants to represent this, but they should + all allow for representing logs of a child task within the scope of the current task. + + Args: + name: A descriptive name of what this span will contain logs about. + + Returns: + An instance of something that meets the protocol of Span. + """ + ...
+ + +
+[docs] + def task_span(self, task_name: str, input: PydanticSerializable) -> "TaskSpan": + """Generate a task-specific span from the current logging instance. + + Each logger implementation can decide on how it wants to represent this, but they should + all allow for representing logs of a span within the context of a parent span. + + Args: + task_name: The name of the task that is being logged + input: The input for the task that is being logged. + + Returns: + An instance of something that also meets the protocol of DebugLogger. Most likely, it + will create an instance of the same type, but this is dependent on the actual + implementation. + """ + ...
+
+ + + +
+[docs] +@runtime_checkable +class Span(AbstractContextManager["Span"], DebugLogger, Protocol): + """A protocol for instrumenting logs nested within a span of time. Groups logs by some logical + step. + + The implementation should also be a Context Manager, to capture the span of duration of + execution. + + Implementations of how logs are collected and stored may differ. Refer to the individual + documentation of each implementation to see how to use the resulting logger. + """ + + ...
+ + + +
+[docs] +@runtime_checkable +class TaskSpan(AbstractContextManager["TaskSpan"], DebugLogger, Protocol): + """A protocol for instrumenting a `Task`'s input, output, and nested logs. + + Most likely, generating this task logger will capture the `Task`'s input, as well as the task + name. + + The implementation should also be a Context Manager, to capture the span of duration of + task execution. + + Implementations of how logs are collected and stored may differ. Refer to the individual + documentation of each implementation to see how to use the resulting logger. + """ + +
+[docs] + def record_output(self, output: PydanticSerializable) -> None: + """Record a `Task`'s output. Since a Context Manager can't provide this in the `__exit__` + method, output should be captured once it is generated. + + This should be handled automatically within the execution of the task. + + Args: + output: The output of the task that is being logged. + """ + ...
+
+ + + +
+[docs] +class NoOpDebugLogger: + """A no-op logger. Useful for cases, like testing, where a logger is needed for a task, but you + don't have a need to collect or inspect the actual logs. + + All calls to `log` won't actually do anything. + """ + +
+[docs] + def log(self, message: str, value: PydanticSerializable) -> None: + """Record a log of relevant information as part of a step within a task. + + By default, the `Input` and `Output` of each `Task` are logged automatically, but you can + log anything else that seems relevant to understanding the output of a given task. + + Args: + message: A description of the value you are logging, such as the step in the task this + is related to. + value: The relevant data you want to log. Can be anything that is serializable by + Pydantic, which gives the loggers flexibility in how they store and emit the logs. + """ + pass
+ + +
+[docs] + def span(self, name: str) -> "NoOpTaskSpan": + """Generate a sub-logger from the current logging instance. + + Args: + name: A descriptive name of what this child logger will contain logs about. + + Returns: + Another `NoOpDebugLogger` + """ + return NoOpTaskSpan()
+ + +
+[docs] + def task_span(self, task_name: str, input: PydanticSerializable) -> "NoOpTaskSpan": + """Generate a task-specific span from the current logging instance. + + + Args: + task_name: The name of the task that is being logged + input: The input for the task that is being logged. + + Returns: + A `NoOpTaskSpan` + """ + + return NoOpTaskSpan()
+
+ + + +
+[docs] +class NoOpTaskSpan(NoOpDebugLogger, AbstractContextManager["NoOpTaskSpan"]): +
+[docs] + def record_output(self, output: PydanticSerializable) -> None: + """Record a `Task`'s output. Since a Context Manager can't provide this in the `__exit__` + method, output should be captured once it is generated. + + This should be handled automatically within the execution of the task. + + Args: + output: The output of the task that is being logged. + """ + pass
+ + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + pass
+ + + +
+[docs] +class JsonSerializer(RootModel[PydanticSerializable]): + root: SerializeAsAny[PydanticSerializable]
+ + + +def _render_log_value(value: PydanticSerializable, title: str) -> Panel: + value = value if isinstance(value, BaseModel) else JsonSerializer(root=value) + return Panel( + Syntax( + value.model_dump_json(indent=2, exclude_defaults=True), + "json", + word_wrap=True, + ), + title=title, + ) + + +
+[docs] +class LogEntry(BaseModel): + """An individual log entry, currently used to represent individual logs by the + `InMemoryDebugLogger`. + + Attributes: + message: A description of the value you are logging, such as the step in the task this + is related to. + value: The relevant data you want to log. Can be anything that is serializable by + Pydantic, which gives the loggers flexibility in how they store and emit the logs. + timestamp: The time that the log was emitted. + """ + + message: str + value: SerializeAsAny[PydanticSerializable] + timestamp: datetime = Field(default_factory=datetime.utcnow) + + def _rich_render_(self) -> Panel: + """Renders the debug log via classes in the `rich` package""" + return _render_log_value(self.value, self.message) + + def _ipython_display_(self) -> None: + """Default rendering for Jupyter notebooks""" + from rich import print + + print(self._rich_render_())
+ + + +
+[docs] +class InMemoryDebugLogger(BaseModel): + """Collects log entries in a nested structure, and keeps them in memory. + + If desired, the structure is serializable with Pydantic, so you can write out the JSON + representation to a file, or return via an API, or something similar. + + Attributes: + name: A descriptive name of what the logger contains log entries about. + logs: A sequential list of log entries and/or nested InMemoryDebugLoggers with their own + log entries. + """ + + name: str + logs: list[Union[LogEntry, "InMemorySpan", "InMemoryTaskSpan"]] = [] + +
+[docs] + def log(self, message: str, value: PydanticSerializable) -> None: + """Record a log of relevant information as part of a step within a task. + + By default, the `Input` and `Output` of each `Task` are logged automatically, but you can + log anything else that seems relevant to understanding the output of a given task. + + Args: + message: A description of the value you are logging, such as the step in the task this + is related to. + value: The relevant data you want to log. Can be anything that is serializable by + Pydantic, which gives the loggers flexibility in how they store and emit the logs. + """ + self.logs.append(LogEntry(message=message, value=value))
+ + +
+[docs] + def span(self, name: str) -> "InMemorySpan": + """Generate a sub-logger from the current logging instance. + + Args: + name: A descriptive name of what this child logger will contain logs about. + + Returns: + A nested `InMemoryDebugLogger` that is stored in a nested position as part of the parent + logger. + """ + child = InMemorySpan(name=name) + self.logs.append(child) + return child
+ + +
+[docs] + def task_span( + self, task_name: str, input: PydanticSerializable + ) -> "InMemoryTaskSpan": + """Generate a task-specific span from the current logging instance. + + + Args: + task_name: The name of the task that is being logged + input: The input for the task that is being logged. + + Returns: + A nested `InMemoryTaskSpan` that is stored in a nested position as part of the parent + logger + """ + + child = InMemoryTaskSpan(name=task_name, input=input) + self.logs.append(child) + return child
+ + + def _rich_render_(self) -> Tree: + """Renders the debug log via classes in the `rich` package""" + tree = Tree(label=self.name) + + for log in self.logs: + tree.add(log._rich_render_()) + + return tree + + def _ipython_display_(self) -> None: + """Default rendering for Jupyter notebooks""" + from rich import print + + print(self._rich_render_())
+ + + +
+[docs] +class InMemorySpan(AbstractContextManager["InMemorySpan"], InMemoryDebugLogger): + start_timestamp: Optional[datetime] = Field(default_factory=datetime.utcnow) + end_timestamp: Optional[datetime] = None + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + self.end_timestamp = datetime.utcnow() + + def _rich_render_(self) -> Tree: + """Renders the debug log via classes in the `rich` package""" + tree = Tree(label=self.name) + + for log in self.logs: + tree.add(log._rich_render_()) + + return tree
+ + + +
+[docs] +class InMemoryTaskSpan(InMemorySpan): + input: SerializeAsAny[PydanticSerializable] + output: Optional[SerializeAsAny[PydanticSerializable]] = None + +
+[docs] + def record_output(self, output: PydanticSerializable) -> None: + """Record a `Task`'s output. Since a Context Manager can't provide this in the `__exit__` + method, output should be captured once it is generated. + + This should be handled automatically within the execution of the task. + + Args: + output: The output of the task that is being logged. + """ + self.output = output
+ + + def _rich_render_(self) -> Tree: + """Renders the debug log via classes in the `rich` package""" + tree = Tree(label=self.name) + + tree.add(_render_log_value(self.input, "Input")) + + for log in self.logs: + tree.add(log._rich_render_()) + + tree.add(_render_log_value(self.output, "Output")) + + return tree
+ + + +
+[docs] +class StartTask(BaseModel): + """Represents the payload/entry of a log-line indicating that a `TaskSpan` was opened through `DebugLogger.task_span`. + + Attributes: + uuid: A unique id for the opened `TaskSpan`. + parent: The unique id of the parent element of opened `TaskSpan`. + This could refer to either a surrounding `TaskSpan`, `Span` or the top-level `DebugLogger`. + name: The name of the task. + start: The timestamp when this `Task` was started (i.e. `run` was called). + input: The `Input` (i.e. parameter for `run`) the `Task` was started with. + """ + + uuid: UUID + parent: UUID + name: str + start: datetime + input: SerializeAsAny[Any]
+ + + +
+[docs] +class EndTask(BaseModel): + """Represents the payload/entry of a log-line that indicates that a `TaskSpan` ended (i.e. the context-manager exited). + + Attributes: + uuid: The uuid of the corresponding `StartTask`. + end: the timestamp when this `Task` completed (i.e. `run` returned). + output: the `Output` (i.e. return value of `run`) the `Task` returned. + """ + + uuid: UUID + end: datetime + output: SerializeAsAny[Any]
+ + + +
+[docs] +class StartSpan(BaseModel): + """Represents the payload/entry of a log-line indicating that a `Span` was opened through `DebugLogger.span`. + + Attributes: + uuid: A unique id for the opened `Span`. + parent: The unique id of the parent element of opened `TaskSpan`. + This could refer to either a surrounding `TaskSpan`, `Span` or the top-level `DebugLogger`. + name: The name of the task. + start: The timestamp when this `Span` was started. + """ + + uuid: UUID + parent: UUID + name: str + start: datetime
+ + + +
+[docs] +class EndSpan(BaseModel): + """Represents the payload/entry of a log-line that indicates that a `Span` ended. + + Attributes: + uuid: The uuid of the corresponding `StartSpan`. + end: the timestamp when this `Span` completed. + """ + + uuid: UUID + end: datetime
+ + + +
+[docs] +class PlainEntry(BaseModel): + """Represents a plain log-entry created through `DebugLogger.log`. + + Attributes: + message: the message-parameter of `DebugLogger.log` + value: the value-parameter of `DebugLogger.log` + timestamp: the timestamp when `DebugLogger.log` was called. + parent: The unique id of the parent element of the log. + This could refer to either a surrounding `TaskSpan`, `Span` or the top-level `DebugLogger`. + """ + + message: str + value: SerializeAsAny[Any] + timestamp: datetime + parent: UUID
+ + + +
+[docs] +class LogLine(BaseModel): + """Represents a a complete log-line. + + Attributes: + entry_type: The type of the entry. This is the class-name of one of the classes + representing a log-entry (e.g. "StartTask"). + entry: The actual entry. + + """ + + entry_type: str + entry: SerializeAsAny[Any]
+ + + +
+[docs] +class FileDebugLogger(DebugLogger): + """A `DebugLogger` that logs to a file. + + Each log-entry is represented by a JSON object. The information logged allows + to reconstruct the hierarchical nature of the logs, i.e. all entries have a + _pointer_ to its parent element in form of a parent attribute containing + the uuid of the parent. + + Args: + log_file_path: Denotes the file to log to. + + Attributes: + uuid: a uuid for the logger. If multiple `FileDebugLogger`s log to the same file + the child-elements for a logger can be identified by referring to this id as parent. + """ + + def __init__(self, log_file_path: Path) -> None: + self._log_file_path = log_file_path + self.uuid = uuid4() + +
+[docs] + def log(self, message: str, value: PydanticSerializable) -> None: + self._log_entry( + PlainEntry( + message=message, + value=value, + timestamp=datetime.utcnow(), + parent=self.uuid, + ) + )
+ + + def _log_entry(self, entry: BaseModel) -> None: + with self._log_file_path.open("a") as f: + f.write( + LogLine(entry_type=type(entry).__name__, entry=entry).model_dump_json() + + "\n" + ) + +
+[docs] + def span(self, name: str) -> "FileSpan": + span = FileSpan(self._log_file_path, name) + self._log_entry( + StartSpan( + uuid=span.uuid, parent=self.uuid, name=name, start=datetime.utcnow() + ) + ) + return span
+ + +
+[docs] + def task_span(self, task_name: str, input: PydanticSerializable) -> "FileTaskSpan": + task = FileTaskSpan(self._log_file_path, task_name, input) + print("Attributes of a LogBasedTaskSpan:") + print(dir(task)) + self._log_entry( + StartTask( + uuid=task.uuid, + parent=self.uuid, + name=task_name, + start=datetime.utcnow(), + input=input, + ) + ) + return task
+
+ + + +
+[docs] +class FileSpan(FileDebugLogger, AbstractContextManager["FileSpan"]): + """A `Span` created by `FileDebugLogger.span`.""" + + def __init__(self, log_file_path: Path, name: str) -> None: + super().__init__(log_file_path) + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + self._log_entry(EndSpan(uuid=self.uuid, end=datetime.utcnow()))
+ + + +
+[docs] +class FileTaskSpan(FileSpan, AbstractContextManager["FileTaskSpan"]): + """A `TaskSpan` created by `FileDebugLogger.task_span`.""" + + output: Optional[PydanticSerializable] = None + + def __init__( + self, log_file_path: Path, task_name: str, input: PydanticSerializable + ) -> None: + super().__init__(log_file_path, task_name) + +
+[docs] + def record_output(self, output: PydanticSerializable) -> None: + self.output = output
+ + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + self._log_entry( + EndTask(uuid=self.uuid, end=datetime.utcnow(), output=self.output) + )
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/core/prompt_template.html b/_modules/intelligence_layer/core/prompt_template.html new file mode 100644 index 000000000..28d8acbb0 --- /dev/null +++ b/_modules/intelligence_layer/core/prompt_template.html @@ -0,0 +1,552 @@ + + + + + + + intelligence_layer.core.prompt_template — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.core.prompt_template

+from collections import defaultdict
+from dataclasses import dataclass, replace
+from itertools import chain
+from re import finditer
+from sys import intern
+from typing import (
+    Any,
+    Dict,
+    Iterable,
+    List,
+    Mapping,
+    NewType,
+    Optional,
+    Sequence,
+    TextIO,
+    Tuple,
+    Union,
+)
+from uuid import UUID, uuid4
+
+from liquid import BoundTemplate, Context, Environment
+from liquid.tag import Tag
+from liquid.parse import get_parser, expect
+from liquid.token import TOKEN_TAG, TOKEN_EOF, TOKEN_EXPRESSION
+from liquid.ast import Node, BlockNode
+from liquid.expressions.common import parse_unchained_identifier
+from liquid.expressions.filtered.lex import tokenize
+from liquid.stream import TokenStream
+from liquid.expressions.stream import TokenStream as AstTokenStream
+from liquid.exceptions import LiquidTypeError
+from liquid.context import Namespace
+
+from aleph_alpha_client.prompt import Image, Prompt, PromptItem, Text, Tokens
+
+Placeholder = NewType("Placeholder", UUID)
+
+
+
+[docs] +@dataclass(frozen=True) +class TextCursor: + """Defines a position with a `Text` prompt item. + + Args: + item: the index of the prompt item within the `Prompt` + position: the character position in the text of the item. + + Example: + >>> Prompt.from_text("This is a text") + >>> TextCursor(item=0, start=5) + >>> # This denotes the "i" in "is" in the text-item of the `Prompt` above + """ + + item: int + position: int
+ + + +
+[docs] +@dataclass(frozen=True) +class PromptItemCursor: + """Defines a position with a non-`Text` prompt item. + + Args: + item: the index of the prompt item within the `Prompt` + """ + + item: int
+ + + +Cursor = Union[TextCursor, PromptItemCursor] + + +
+[docs] +@dataclass +class PromptRange: + """Defines a range within a `Prompt`.""" + + start: Cursor + end: Cursor
+ + + +
+[docs] +@dataclass(frozen=True) +class PromptWithMetadata: + """The `Prompt` along with some metadata generated when a `PromptTemplate` is turned into a `Prompt`. + + Args: + prompt: The actual `Prompt`. + ranges: A mapping of range name to a `Sequence` of corresponding `PromptRange`s. + """ + + prompt: Prompt + ranges: Mapping[str, Sequence[PromptRange]]
+ + + +PROMPT_RANGE_TAG = intern("promptrange") +PROMPT_RANGE_END_TAG = intern("endpromptrange") + + +
+[docs] +class PromptRangeTag(Tag): + """Defines the liquid tag for the promptrange.""" + + name = PROMPT_RANGE_TAG + end = PROMPT_RANGE_END_TAG + + def __init__(self, env: Environment): + super().__init__(env) + self.parser = get_parser(env) + +
+[docs] + def parse(self, stream: TokenStream) -> Node: + expect(stream, TOKEN_TAG, PROMPT_RANGE_TAG) + stream.next_token() + expect(stream, TOKEN_EXPRESSION) + + name = str( + parse_unchained_identifier(AstTokenStream(tokenize(stream.current.value))) + ) + stream.next_token() + block = self.parser.parse_block(stream, (PROMPT_RANGE_END_TAG, TOKEN_EOF)) + expect(stream, TOKEN_TAG, value=PROMPT_RANGE_END_TAG) + return PromptRangeNode(block, name)
+
+ + + +
+[docs] +class PromptRangeContext(Context): + """A liquid `Context` with some additional state used by the `PromptRangeNode`.""" + + def __init__( + self, + env: Environment, + globals: Optional[Namespace] = None, + disabled_tags: Optional[List[str]] = None, + copy_depth: int = 0, + parent_context: Optional[Context] = None, + loop_iteration_carry: int = 1, + local_namespace_size_carry: int = 0, + template: Optional[BoundTemplate] = None, + ): + super().__init__( + env, + globals, + disabled_tags, + copy_depth, + parent_context, + loop_iteration_carry, + local_namespace_size_carry, + template, + ) + self._placeholder_range_names: dict[Placeholder, str] = {} + +
+[docs] + def add_placeholder_range(self, placeholder: Placeholder, name: str) -> None: + self._placeholder_range_names[placeholder] = name
+ + +
+[docs] + def placeholder_range_names(self) -> Mapping[Placeholder, str]: + return self._placeholder_range_names
+
+ + + +
+[docs] +class PromptRangeNode(Node): + """A liquid `Node` representing a promptrange.""" + + def __init__(self, inner: BlockNode, name: str) -> None: + super().__init__() + self.inner = inner + self.name = name + self.placeholder = Placeholder(uuid4()) + +
+[docs] + def render_to_output(self, context: Context, buffer: TextIO) -> Optional[bool]: + if not isinstance(context, PromptRangeContext): + raise LiquidTypeError( + f"Context not of expected type: {PromptRangeContext} (is: {type(context)})" + ) + context.add_placeholder_range(self.placeholder, self.name) + buffer.write(str(self.placeholder)) + self.inner.render(context, buffer) + buffer.write(str(self.placeholder)) + return True
+
+ + + +
+[docs] +class PromptTemplate: + """Allows to build a `Prompt` using the `liquid template language <https://shopify.github.io/liquid/>`_. + + To add non-text prompt items first you have to save it to the template with the `template.placeholder()` function. + To embed the items in the template, pass the placeholder in the place(s) where you would like the items. + + Example: + >>> image = Image.from_file(Path("path-to-image")) + >>> template = PromptTemplate( + '''{%- for name in names -%} + Hello {{name}}! + {% endfor -%} + {{ image }} + ''') + >>> placeholder = template.placeholder(image) + >>> names = ["World", "Rutger"] + >>> prompt = template.to_prompt(names=names, image=placeholder) + >>> request = CompletionRequest(prompt=prompt) + """ + + def __init__(self, template_str: str) -> None: + """Initialize with the liquid template string. + + The template supports the custom liquid tag `promptrange`. This can be used to determine ranges + within the `Prompt` primarily for downstream explainability tasks. + + Args: + template_str: the liquid template string + + Example: + >>> template = PromptTemplate( + '''Answer the following question given the input. + + Input: {% promptrange input %}{{text}}{% endpromptrange %} + Question: {% promptrange question %}{{question}}{% endpromptrange %} + Answer:''') + >>> prompt_data = template.to_prompt_data(text="Some text...", question="A question ...") + >>> input_range = prompt_data.range.get("input") + """ + env = Environment() + env.add_tag(PromptRangeTag) + self.template = env.from_string(template_str) + self.prompt_item_placeholders: Dict[Placeholder, Union[Image, Tokens]] = {} + +
+[docs] + def placeholder(self, value: Union[Image, Tokens]) -> Placeholder: + """Saves a non-text prompt item to the template and returns a placeholder + + The placeholder is used to embed the prompt item in the template + """ + id = Placeholder(uuid4()) + self.prompt_item_placeholders[id] = value + return id
+ + + def _join_character( + self, first_item: Union[Text, Image, Tokens, None], second_item: Text + ) -> str: + if ( + isinstance(first_item, Text) + and not first_item.text[-1].isspace() + and not second_item.text[0].isspace() + ): + return " " + else: + return "" + +
+[docs] + def embed_prompt(self, prompt: Prompt) -> str: + """Embeds a prompt in a prompt template + + Adds whitespace between text items if there is no whitespace between them. + In case of non-text prompt items, this embeds them into the end result. + + Example: + >>> user_prompt = Prompt( + [ + Tokens.from_token_ids([1, 2, 3]), + Text.from_text("cool"), + Image.from_file(Path("path-to-image")), + ] + ) + >>> template = PromptTemplate("Question: {{user_prompt}}\\n Answer: ") + >>> prompt = template.to_prompt(user_prompt=template.embed_prompt(user_prompt)) + + Parameters: + prompt: prompt to embed in the template + """ + prompt_text = "" + last_item = None + for item in prompt.items: + if isinstance(item, Text): + if len(item.text) == 0: + continue + prompt_text = str.join( + self._join_character(last_item, item), [prompt_text, item.text] + ) + else: + prompt_text = str.join("", [prompt_text, str(self.placeholder(item))]) + last_item = item + return prompt_text
+ + +
+[docs] + def to_prompt_with_metadata(self, **kwargs: Any) -> PromptWithMetadata: + """Creates a `Prompt` along with metadata from the template string and the given parameters. + + Currently the only metadata returned is information about ranges that are marked in the template. + Provided parameters are passed to `liquid.Template.render`. + """ + context = PromptRangeContext( + self.template.env, + globals=self.template.make_globals(kwargs), + template=self.template, + ) + buffer = self.template._get_buffer() + self.template.render_with_context(context, buffer, **kwargs) + liquid_prompt = buffer.getvalue() + placeholder_indices = self._compute_indices( + chain( + self.prompt_item_placeholders.keys(), + context.placeholder_range_names().keys(), + ), + liquid_prompt, + ) + modalities, placeholder_ranges = self._compute_modalities_and_ranges( + placeholder_indices, context.placeholder_range_names(), liquid_prompt + ) + + result = PromptWithMetadata(Prompt(modalities), placeholder_ranges) + self._reset_placeholder_state() + return result
+ + +
+[docs] + def to_prompt(self, **kwargs: Any) -> Prompt: + """Creates a `Prompt` from the template string and the given parameters. + + Provided parameters are passed to `liquid.Template.render`. + """ + return self.to_prompt_with_metadata(**kwargs).prompt
+ + + def _reset_placeholder_state(self) -> None: + self.prompt_item_placeholders = {} + + def _compute_indices( + self, placeholders: Iterable[Placeholder], template: str + ) -> Iterable[Tuple[int, int]]: + pattern = "|".join(str(placeholder) for placeholder in placeholders) + return ( + ( + (match.start(), match.end()) + for match in finditer(f"({pattern})", template) + ) + if pattern + else [] + ) + + def _compute_modalities_and_ranges( + self, + placeholder_indices: Iterable[Tuple[int, int]], + placeholder_range_names: Mapping[Placeholder, str], + template: str, + ) -> Tuple[Sequence[PromptItem], Mapping[str, Sequence[PromptRange]]]: + placeholder_ranges: Dict[Placeholder, List[PromptRange]] = defaultdict(list) + modalities = list( + self._modalities_from(placeholder_indices, placeholder_ranges, template) + ) + self._replace_start_cursors_of_non_text_items(modalities, placeholder_ranges) + return modalities, { + placeholder_range_names[placeholder]: ranges + for placeholder, ranges in placeholder_ranges.items() + if placeholder_range_names.get(placeholder) + } + + @staticmethod + def _replace_start_cursors_of_non_text_items( + modalities: Sequence[PromptItem], + placeholder_ranges: Dict[Placeholder, List[PromptRange]], + ) -> None: + for prompt_ranges in placeholder_ranges.values(): + for index, range in enumerate(prompt_ranges): + if not isinstance(modalities[range.start.item], Text): + prompt_ranges[index] = replace( + range, start=PromptItemCursor(range.start.item) + ) + + def _modalities_from( + self, + placeholder_indices: Iterable[Tuple[int, int]], + placeholder_ranges: dict[Placeholder, List[PromptRange]], + template: str, + ) -> Iterable[PromptItem]: + last_to = 0 + accumulated_text = "" + item_cnt = 0 + range_starts: Dict[Placeholder, TextCursor] = {} + + def new_prompt_item(item: PromptItem) -> PromptItem: + nonlocal item_cnt, accumulated_text + item_cnt += 1 + accumulated_text = "" + return item + + def initial_start_text_cursor() -> TextCursor: + return TextCursor(item=item_cnt, position=len(accumulated_text)) + + def end_cursor() -> Cursor: + return ( + TextCursor(item=item_cnt, position=len(accumulated_text)) + if accumulated_text + else PromptItemCursor(item_cnt - 1) + ) + + def valid_range_for( + placeholder: Placeholder, end: Cursor + ) -> Iterable[PromptRange]: + if end.item >= range_starts[placeholder].item: + yield PromptRange(start=range_starts[placeholder], end=end) + del range_starts[placeholder] + + for placeholder_from, placeholder_to in placeholder_indices: + placeholder = Placeholder(UUID(template[placeholder_from:placeholder_to])) + accumulated_text += template[last_to:placeholder_from] + placeholder_prompt_item = self.prompt_item_placeholders.get(placeholder) + if placeholder_prompt_item: + if accumulated_text: + yield new_prompt_item(Text.from_text(accumulated_text)) + + yield new_prompt_item(placeholder_prompt_item) + else: + if range_starts.get(placeholder): + placeholder_ranges[placeholder].extend( + valid_range_for(placeholder, end_cursor()) + ) + else: + range_starts[placeholder] = initial_start_text_cursor() + last_to = placeholder_to + if last_to < len(template) or accumulated_text: + yield Text.from_text(accumulated_text + template[last_to:])
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/core/task.html b/_modules/intelligence_layer/core/task.html new file mode 100644 index 000000000..40a68eca2 --- /dev/null +++ b/_modules/intelligence_layer/core/task.html @@ -0,0 +1,255 @@ + + + + + + + intelligence_layer.core.task — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.core.task

+from abc import ABC, abstractmethod
+from concurrent.futures import ThreadPoolExecutor
+import functools
+from itertools import islice
+from typing import (
+    Any,
+    Generic,
+    Iterable,
+    NewType,
+    Sequence,
+    TypeVar,
+    Callable,
+)
+
+from pydantic import (
+    BaseModel,
+)
+
+from intelligence_layer.core.logger import DebugLogger, PydanticSerializable
+
+
+Chunk = NewType("Chunk", str)
+"""Segment of a larger text.
+
+This type infers that the string is smaller than the context size of the model where it is used.
+
+LLMs can't process documents larger than their context size.
+To handle this, documents have to be split up into smaller segments that fit within their context size.
+These smaller segments are referred to as chunks.
+"""
+
+LogProb = NewType("LogProb", float)
+Probability = NewType("Probability", float)
+
+
+
+[docs] +class Token(BaseModel): + """A token class containing it's id and the raw token. + + This is used instead of the Aleph Alpha client Token class since this one is serializable, + while the one from the client is not. + """ + + token: str + token_id: int
+ + + +Input = TypeVar("Input", bound=PydanticSerializable) +"""Interface to be passed to the task with all data needed to run the process. +Ideally, these are specified in terms related to the use-case, rather than lower-level +configuration options.""" +Output = TypeVar("Output", bound=PydanticSerializable) +"""Interface of the output returned by the task.""" + + +MAX_CONCURRENCY = 20 +global_executor = ThreadPoolExecutor(max_workers=MAX_CONCURRENCY) + + +
+[docs] +class Task(ABC, Generic[Input, Output]): + """Base task interface. This may consist of several sub-tasks to accomplish the given task. + + Generics: + Input: Interface to be passed to the task with all data needed to run the process. + Ideally, these are specified in terms related to the use-case, rather than lower-level + configuration options. + Output: Interface of the output returned by the task. + """ + + def __init_subclass__(cls, **kwargs: Any) -> None: + """Decorates run method to auto log input and output for the task""" + super().__init_subclass__(**kwargs) + + def log_run_input_output( + func: Callable[["Task[Input, Output]", Input, DebugLogger], Output] + ) -> Callable[["Task[Input, Output]", Input, DebugLogger], Output]: + @functools.wraps(func) + def inner( + self: "Task[Input, Output]", + input: Input, + logger: DebugLogger, + ) -> Output: + with logger.task_span(type(self).__name__, input) as task_span: + output = func(self, input, task_span) + task_span.record_output(output) + return output + + return inner + + cls.run = log_run_input_output(cls.run) # type: ignore + +
+[docs] + @abstractmethod + def run(self, input: Input, logger: DebugLogger) -> Output: + """Executes the process for this use-case.""" + ...
+ + +
+[docs] + def run_concurrently( + self, + inputs: Iterable[Input], + debug_logger: DebugLogger, + concurrency_limit: int = MAX_CONCURRENCY, + ) -> Sequence[Output]: + """Executes multiple processes of this task concurrently. + + Each provided input is potentially executed concurrently to the others. There is a global limit + on the number of concurrently executed tasks that is shared by all tasks of all types. + + Args: + inputs: The inputs that are potentially processed concurrently. + debug_logger: The logger passed on the `run` method when executing a task. + concurrency_limit: An optional additional limit for the number of concurrently executed task for + this method call. This can be used to prevent queue-full or similar error of downstream APIs + when the global concurrency limit is too high for a certain task. + Returns: + The `Output`s generated by calling `run` for each given `Input`. The order of `Output`s + corresponds to the order of the `Input`s. + """ + + with debug_logger.span(f"Concurrent {type(self).__name__} tasks") as span: + + def run_batch(inputs: Iterable[Input]) -> Iterable[Output]: + return global_executor.map( + lambda input: self.run(input, span), + inputs, + ) + + return [ + output + for batch in batched(inputs, concurrency_limit) + for output in run_batch(batch) + ]
+
+ + + +T = TypeVar("T") + + +
+[docs] +def batched(iterable: Iterable[T], n: int) -> Iterable[Iterable[T]]: + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + while batch := tuple(islice(it, n)): + yield batch
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/core/text_highlight.html b/_modules/intelligence_layer/core/text_highlight.html new file mode 100644 index 000000000..6122dc792 --- /dev/null +++ b/_modules/intelligence_layer/core/text_highlight.html @@ -0,0 +1,419 @@ + + + + + + + intelligence_layer.core.text_highlight — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.core.text_highlight

+from typing import Iterable, Sequence
+
+from aleph_alpha_client import (
+    Client,
+    ExplanationRequest,
+    ExplanationResponse,
+    PromptGranularity,
+    Text,
+    TextPromptItemExplanation,
+    Prompt,
+)
+from aleph_alpha_client.explanation import TextScoreWithRaw
+from pydantic import BaseModel
+
+from intelligence_layer.core.explain import Explain, ExplainInput
+from intelligence_layer.core.prompt_template import (
+    Cursor,
+    PromptRange,
+    PromptWithMetadata,
+    TextCursor,
+)
+from intelligence_layer.core.task import Task
+from intelligence_layer.core.logger import DebugLogger
+
+
+
+[docs] +class TextHighlightInput(BaseModel): + """The input for a text highlighting task. + + Attributes: + prompt_with_metadata: From client's PromptTemplate. Includes both the actual 'Prompt' as well as text range information. + Supports liquid-template-language-style {% promptrange range_name %}/{% endpromptrange %} for range. + target: The target that should be explained. Expected to follow the prompt. + model: A valid Aleph Alpha model name. + focus_ranges: The ranges contained in `prompt_with_metadata` the returned highlights stem from. That means that each returned + highlight overlaps with at least one character with one of the ranges listed here. + If this set is empty highlights of the entire prompt are returned. + """ + + prompt_with_metadata: PromptWithMetadata + target: str + model: str + focus_ranges: frozenset[str] = frozenset()
+ + + +
+[docs] +class ScoredTextHighlight(BaseModel): + """A substring of the input prompt scored for relevance with regard to the output. + + Attributes: + text: The highlighted part of the prompt. + score: The z-score of the highlight. Depicts relevance of this highlight in relation to all other highlights. Can be positive (support) or negative (contradiction). + """ + + text: str + score: float
+ + + +
+[docs] +class TextHighlightOutput(BaseModel): + """The output of a text highlighting task. + + Attributes: + highlights: A sequence of 'ScoredTextHighlight's. + """ + + highlights: Sequence[ScoredTextHighlight]
+ + + +
+[docs] +class TextHighlight(Task[TextHighlightInput, TextHighlightOutput]): + """Generates text highlights given a prompt and completion. + + For a given prompt and target (completion), extracts the parts of the prompt responsible for generation. + A range can be provided in the input 'PromptWithMetadata' via use of the liquid language (see the example). + In this case, the highlights will only refer to text within this range. + + Args: + client: Aleph Alpha client instance for running model related API calls. + + Example: + >>> client = Client(os.getenv("AA_TOKEN")) + >>> text_highlight = TextHighlight(client=client) + >>> prompt_template_str = "{% promptrange r1 %}Question: What is 2 + 2?{% endpromptrange %}\nAnswer:" + >>> template = PromptTemplate(prompt_template_str) + >>> prompt_with_metadata = template.to_prompt_with_metadata() + >>> completion = " 4." + >>> model = "luminous-base" + >>> input = TextHighlightInput( + >>> prompt_with_metadata=prompt_with_metadata, target=completion, model=model + >>> ) + >>> output = text_highlight.run(input, InMemoryLogger(name="Highlight")) + """ + + _client: Client + + def __init__( + self, + client: Client, + granularity: PromptGranularity = PromptGranularity.Sentence, + ) -> None: + super().__init__() + self._client = client + self._explain_task = Explain(client) + self._granularity = granularity + +
+[docs] + def run( + self, input: TextHighlightInput, logger: DebugLogger + ) -> TextHighlightOutput: + self._raise_on_invalid_focus_range(input) + explanation = self._explain( + prompt=input.prompt_with_metadata.prompt, + target=input.target, + model=input.model, + logger=logger, + ) + prompt_ranges = self._flatten_prompt_ranges( + range + for name, range in input.prompt_with_metadata.ranges.items() + if name in input.focus_ranges + ) + text_prompt_item_explanations_and_indices = ( + self._extract_text_prompt_item_explanations_and_item_index( + input.prompt_with_metadata.prompt, explanation + ) + ) + highlights = self._to_highlights( + prompt_ranges, + text_prompt_item_explanations_and_indices, + logger, + ) + return TextHighlightOutput(highlights=highlights)
+ + + def _raise_on_invalid_focus_range(self, input: TextHighlightInput) -> None: + unknown_focus_ranges = input.focus_ranges - set( + input.prompt_with_metadata.ranges.keys() + ) + if unknown_focus_ranges: + raise ValueError(f"Unknown focus ranges: {', '.join(unknown_focus_ranges)}") + + def _explain( + self, prompt: Prompt, target: str, model: str, logger: DebugLogger + ) -> ExplanationResponse: + request = ExplanationRequest( + prompt, + target, + prompt_granularity=self._granularity, + ) + output = self._explain_task.run( + ExplainInput(request=request, model=model), logger + ) + return output.response + + def _flatten_prompt_ranges( + self, prompt_ranges: Iterable[Sequence[PromptRange]] + ) -> Sequence[PromptRange]: + return [pr for prs in prompt_ranges for pr in prs] + + def _extract_text_prompt_item_explanations_and_item_index( + self, + prompt: Prompt, + explanation_response: ExplanationResponse, + ) -> Sequence[tuple[TextPromptItemExplanation, int]]: + prompt_texts_and_indices = [ + (prompt_text, idx) + for idx, prompt_text in enumerate(prompt.items) + if isinstance(prompt_text, Text) + ] + text_prompt_item_explanations = [ + explanation + for explanation in explanation_response.explanations[0].items + if isinstance(explanation, TextPromptItemExplanation) + ] # explanations[0], because one explanation for each target + assert len(prompt_texts_and_indices) == len(text_prompt_item_explanations) + return [ + ( + text_prompt_item_explanation.with_text(prompt_text_and_index[0]), + prompt_text_and_index[1], + ) + for prompt_text_and_index, text_prompt_item_explanation in zip( + prompt_texts_and_indices, text_prompt_item_explanations + ) + ] + + def _to_highlights( + self, + prompt_ranges: Sequence[PromptRange], + text_prompt_item_explanations_and_indices: Sequence[ + tuple[TextPromptItemExplanation, int] + ], + logger: DebugLogger, + ) -> Sequence[ScoredTextHighlight]: + overlapping_and_flat = [ + text_score + for text_prompt_item_explanation, explanation_idx in text_prompt_item_explanations_and_indices + for text_score in text_prompt_item_explanation.scores + if isinstance(text_score, TextScoreWithRaw) + and self._is_relevant_explanation( + explanation_idx, text_score, prompt_ranges + ) + ] + logger.log( + "Raw explanation scores", + [ + { + "text": text_score.text, + "score": text_score.score, + } + for text_score in overlapping_and_flat + ], + ) + if not overlapping_and_flat: + return [] + z_scores = self._z_scores([s.score for s in overlapping_and_flat]) + scored_highlights = [ + ScoredTextHighlight(text=text_score.text, score=z_score) + for text_score, z_score in zip(overlapping_and_flat, z_scores) + ] + return self._filter_highlights(scored_highlights) + + def _is_relevant_explanation( + self, + explanation_idx: int, + text_score: TextScoreWithRaw, + prompt_ranges: Sequence[PromptRange], + ) -> bool: + return ( + any( + self._prompt_range_overlaps_with_text_score( + prompt_range, text_score, explanation_idx + ) + for prompt_range in prompt_ranges + ) + or not prompt_ranges + ) + + @classmethod + def _prompt_range_overlaps_with_text_score( + cls, + prompt_range: PromptRange, + text_score: TextScoreWithRaw, + explanation_item_idx: int, + ) -> bool: + return ( + cls._is_within_prompt_range( + prompt_range, + explanation_item_idx, + text_score.start, + ) + or cls._is_within_prompt_range( + prompt_range, + explanation_item_idx, + text_score.start + text_score.length - 1, + ) + or cls._is_within_text_score( + text_score, explanation_item_idx, prompt_range.start + ) + ) + + @staticmethod + def _is_within_text_score( + text_score: TextScoreWithRaw, + text_score_item: int, + prompt_range_cursor: Cursor, + ) -> bool: + if text_score_item != prompt_range_cursor.item: + return False + assert isinstance(prompt_range_cursor, TextCursor) + return ( + text_score.start + <= prompt_range_cursor.position + <= text_score.start + text_score.length - 1 + ) + + @staticmethod + def _is_within_prompt_range( + prompt_range: PromptRange, + item_check: int, + pos_check: int, + ) -> bool: + if item_check < prompt_range.start.item or item_check > prompt_range.end.item: + return False + if item_check == prompt_range.start.item: + # must be a text cursor, because has same index as TextScoreWithRaw + assert isinstance(prompt_range.start, TextCursor) + if pos_check < prompt_range.start.position: + return False + if item_check == prompt_range.end.item: + assert isinstance(prompt_range.end, TextCursor) # see above + if pos_check > prompt_range.end.position: + return False + return True + + @staticmethod + def _z_scores(data: Sequence[float]) -> Sequence[float]: + mean = 0 # assuming a mean of 0 (population mean), therefore also assuming n instead of n-1 (population df) + stdev = ( + (sum((x - mean) ** 2 for x in data) / len(data)) ** 0.5 + if len(data) > 1 + else 0 + ) + return [((x - mean) / stdev if stdev > 0 else 0) for x in data] + + def _filter_highlights( + self, + scored_highlights: Sequence[ScoredTextHighlight], + z_score_limit: float = 1.0, + ) -> Sequence[ScoredTextHighlight]: + return [h for h in scored_highlights if abs(h.score) >= z_score_limit]
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/classify/classify.html b/_modules/intelligence_layer/use_cases/classify/classify.html new file mode 100644 index 000000000..016826b17 --- /dev/null +++ b/_modules/intelligence_layer/use_cases/classify/classify.html @@ -0,0 +1,228 @@ + + + + + + + intelligence_layer.use_cases.classify.classify — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.classify.classify

+from typing import (
+    Mapping,
+    Sequence,
+)
+
+from pydantic import BaseModel
+from intelligence_layer.core.evaluator import Evaluator
+from intelligence_layer.core.logger import DebugLogger
+from intelligence_layer.core.task import Chunk, Probability, Task
+
+
+
+[docs] +class ClassifyInput(BaseModel): + """Input for a classification task. + + Attributes: + chunk: text to be classified. + labels: Possible labels the model will choose a label from + """ + + chunk: Chunk + labels: frozenset[str]
+ + + +
+[docs] +class ClassifyOutput(BaseModel): + """Output for a single label classification task. + + Attributes: + scores: Mapping of the provided label (key) to corresponding score (value). + The score represents how sure the model is that this is the correct label. + This will be a value between 0 and 1. + The sum of all probabilities will be 1. + """ + + scores: Mapping[str, Probability]
+ + + +
+[docs] +class Classify(Task[ClassifyInput, ClassifyOutput]): + """Placeholder class for any classifier implementation.""" + + pass
+ + + +
+[docs] +class ClassifyEvaluation(BaseModel): + """The evaluation of a single label classification run. + + Attributes: + correct: Was the highest scoring class from the output in the set of "correct classes" + output: The actual output from the task run + """ + + correct: bool + output: ClassifyOutput
+ + + +
+[docs] +class AggregatedClassifyEvaluation(BaseModel): + """The aggregated evaluation of a single label classify implementation against a dataset. + + Attributes: + percentage_correct: Percentage of answers that were considered to be correct + evaluation: The actual evaluations + """ + + percentage_correct: float + evaluations: Sequence[ClassifyEvaluation]
+ + + +
+[docs] +class ClassifyEvaluator( + Evaluator[ + ClassifyInput, + Sequence[str], + ClassifyEvaluation, + AggregatedClassifyEvaluation, + ] +): + def __init__(self, task: Classify): + self.task = task + +
+[docs] + def evaluate( + self, + input: ClassifyInput, + logger: DebugLogger, + expected_output: Sequence[str], + ) -> ClassifyEvaluation: + output = self.task.run(input, logger) + sorted_classes = sorted( + output.scores.items(), key=lambda item: item[1], reverse=True + ) + if sorted_classes[0][0] in expected_output: + correct = True + else: + correct = False + return ClassifyEvaluation(correct=correct, output=output)
+ + +
+[docs] + def aggregate( + self, evaluations: Sequence[ClassifyEvaluation] + ) -> AggregatedClassifyEvaluation: + if len(evaluations) != 0: + correct_answers = len( + [eval.correct for eval in evaluations if eval.correct == True] + ) / len(evaluations) + else: + correct_answers = 0 + return AggregatedClassifyEvaluation( + percentage_correct=correct_answers, evaluations=evaluations + )
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/classify/embedding_based_classify.html b/_modules/intelligence_layer/use_cases/classify/embedding_based_classify.html new file mode 100644 index 000000000..11db6034b --- /dev/null +++ b/_modules/intelligence_layer/use_cases/classify/embedding_based_classify.html @@ -0,0 +1,280 @@ + + + + + + + intelligence_layer.use_cases.classify.embedding_based_classify — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.classify.embedding_based_classify

+from enum import Enum
+import statistics
+from typing import Sequence
+from aleph_alpha_client import Client
+
+from pydantic import BaseModel
+from qdrant_client.http.models import models
+
+from intelligence_layer.connectors.retrievers.base_retriever import Document
+from intelligence_layer.connectors.retrievers.in_memory_retriever import (
+    InMemoryRetriever,
+    RetrieverType,
+)
+from intelligence_layer.core.logger import DebugLogger
+from intelligence_layer.core.task import Chunk, Probability
+from intelligence_layer.use_cases.classify.classify import (
+    Classify,
+    ClassifyInput,
+    ClassifyOutput,
+)
+from intelligence_layer.use_cases.search.filter_search import (
+    FilterSearch,
+    FilterSearchInput,
+)
+from intelligence_layer.use_cases.search.search import SearchOutput
+
+
+
+[docs] +class LabelWithExamples(BaseModel): + """Defines a label and the list of examples making it up. + + Attributes: + name: Name of the label. + examples: The texts defining the example. Should be similar in structure + and semantics to the texts to be classified on inference. + """ + + name: str + examples: Sequence[str]
+ + + +
+[docs] +class EmbeddingBasedClassifyScoring(Enum): + """Specify the type of scoring to use. + + Attributes: + MAX: Takes the mean of the top match, i.e., the max. + MEAN_TOP_5: Takes the mean of the top 5 matches. + """ + + MAX = 1 + MEAN_TOP_5 = 5
+ + + +
+[docs] +class EmbeddingBasedClassify(Classify): + """Task that classifies a given input text based on examples. + + The input contains a complete set of all possible labels. The output will return a score + for each possible label. Scores will be between 0 and 1 but do not have to add up to one. + On initiation, provide a list of examples for each label. + + This methodology works best with a larger number of examples per label and with labels + that consist of easily definable semantic clusters. + + Args: + labels_with_examples: Examples to be used for classification. + client: Aleph Alpha client instance for running model related API calls. + scoring: Configure how to calculate the final score. + + Attributes: + METADATA_LABEL_NAME: The metadata field for label name for the `InMemoryRetriever` + instance. + + Example: + >>> labels_with_examples = [ + >>> LabelWithExamples( + >>> name="positive", + >>> examples=[ + >>> "I really like this.", + >>> ], + >>> ), + >>> LabelWithExamples( + >>> name="negative", + >>> examples=[ + >>> "I really dislike this.", + >>> ], + >>> ), + >>> ] + >>> client = Client(token="AA_TOKEN") + >>> task = EmbeddingBasedClassify(labels_with_examples, client) + >>> input = ClassifyInput( + >>> text="This is a happy text.", + >>> labels={"positive", "negative"} + >>> ) + >>> logger = InMemoryLogger(name="Classify") + >>> output = task.run(input, logger) + >>> print(output.scores["positive"]) + 0.7 + """ + + METADATA_LABEL_NAME = "label" + + def __init__( + self, + labels_with_examples: Sequence[LabelWithExamples], + client: Client, + scoring: EmbeddingBasedClassifyScoring = EmbeddingBasedClassifyScoring.MEAN_TOP_5, + ) -> None: + super().__init__() + self._labels_with_examples = labels_with_examples + documents = self._labels_with_examples_to_documents(labels_with_examples) + self._scoring = scoring + retriever = InMemoryRetriever( + client, + documents=documents, + k=scoring.value, + retriever_type=RetrieverType.SYMMETRIC, + ) + self._filter_search = FilterSearch(retriever) + +
+[docs] + def run(self, input: ClassifyInput, logger: DebugLogger) -> ClassifyOutput: + available_labels = set( + class_with_examples.name + for class_with_examples in self._labels_with_examples + ) + unknown_labels = input.labels - available_labels + if unknown_labels: + raise ValueError(f"Got unexpected labels: {', '.join(unknown_labels)}.") + labels = list(input.labels) # converting to list to preserve order + results_per_label = [ + self._label_search(input.chunk, label, logger) for label in labels + ] + scores = self._calculate_scores(results_per_label) + return ClassifyOutput( + scores={l: Probability(s) for l, s in zip(labels, scores)} + )
+ + + def _labels_with_examples_to_documents( + self, classes_with_examples: Sequence[LabelWithExamples] + ) -> Sequence[Document]: + return [ + Document( + text=e, metadata={self.METADATA_LABEL_NAME: class_with_examples.name} + ) + for class_with_examples in classes_with_examples + for e in class_with_examples.examples + ] + + def _label_search( + self, chunk: Chunk, label: str, logger: DebugLogger + ) -> SearchOutput: + search_input = FilterSearchInput( + query=chunk, + filter=models.Filter( + must=[ + models.FieldCondition( + key=f"metadata.{self.METADATA_LABEL_NAME}", + match=models.MatchValue(value=label), + ), + ] + ), + ) + return self._filter_search.run(search_input, logger) + + def _calculate_scores( + self, results_per_label: Sequence[SearchOutput] + ) -> Sequence[float]: + return [ + statistics.mean(r.score for r in r_per_l.results) + for r_per_l in results_per_label + ]
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/classify/single_label_classify.html b/_modules/intelligence_layer/use_cases/classify/single_label_classify.html new file mode 100644 index 000000000..250ba70f4 --- /dev/null +++ b/_modules/intelligence_layer/use_cases/classify/single_label_classify.html @@ -0,0 +1,338 @@ + + + + + + + intelligence_layer.use_cases.classify.single_label_classify — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.classify.single_label_classify

+import math
+import re
+from typing import (
+    Iterable,
+    Mapping,
+    Optional,
+    Sequence,
+)
+
+from aleph_alpha_client import (
+    Client,
+    PromptTemplate,
+    Prompt,
+)
+
+from intelligence_layer.core.complete import (
+    Complete,
+)
+from intelligence_layer.core.echo import EchoInput, EchoTask, TokenWithProb
+from intelligence_layer.core.logger import DebugLogger
+from intelligence_layer.core.task import Probability, Token
+from intelligence_layer.use_cases.classify.classify import (
+    Classify,
+    ClassifyInput,
+    ClassifyOutput,
+)
+
+
+
+[docs] +def to_aa_tokens_prompt(tokens: Sequence[Token]) -> Prompt: + return Prompt.from_tokens([token.token_id for token in tokens])
+ + + +
+[docs] +class SingleLabelClassify(Classify): + """Task that classifies a given input text with one of the given classes. + + The input contains a complete set of all possible labels. The output will return a score for + each possible label. All scores will add up to 1 and are relative to each other. The highest + score is given to the most likely class. + + This methodology works best for classes that are easily understood, and don't require an + explanation or examples. + + Args: + client: Aleph Alpha client instance for running model related API calls. + + Attributes: + PROMPT_TEMPLATE_STR: The prompt template used for answering the question. + 'text' and 'labels' will be inserted here. + MODEL: A valid Aleph Alpha model name. + + Example: + >>> client = Client(token="AA_TOKEN") + >>> task = SingleLabelClassify(client) + >>> input = ClassifyInput( + text="This is a happy text.", + labels={"positive", "negative"} + ) + >>> logger = InMemoryLogger(name="Classify") + >>> output = task.run(input, logger) + >>> print(output.scores["positive"]) + 0.9 + """ + + PROMPT_TEMPLATE: str = """### Instruction: +Identify a class that describes the text adequately. +Reply with only the class label. + +### Input: +{{text}} + +### Response:""" + MODEL: str = "luminous-base-control" + _client: Client + + def __init__(self, client: Client) -> None: + super().__init__() + self._client = client + self._completion_task = Complete(client) + self._echo_task = EchoTask(client) + +
+[docs] + def run(self, input: ClassifyInput, logger: DebugLogger) -> ClassifyOutput: + log_probs_per_label = self._log_probs_per_label( + text_to_classify=input.chunk, + labels=input.labels, + model=self.MODEL, + logger=logger, + ) + logger.log("Log probs per label", log_probs_per_label) + normalized_probs_per_label = self._normalize(log_probs_per_label, logger) + scores = self._compute_scores(normalized_probs_per_label) + return ClassifyOutput( + scores=scores, + )
+ + + def _log_probs_per_label( + self, + text_to_classify: str, + labels: frozenset[str], + model: str, + logger: DebugLogger, + ) -> Mapping[str, Sequence[TokenWithProb]]: + prompt = PromptTemplate(template_str=self.PROMPT_TEMPLATE).to_prompt( + text=text_to_classify + ) + inputs = ( + EchoInput( + prompt=prompt, + expected_completion=self._prepare_label_for_echo_task(label), + model=model, + ) + for label in labels + ) + outputs = self._echo_task.run_concurrently(inputs, logger) + return { + label: output.tokens_with_log_probs + for label, output in zip(labels, outputs) + } + + def _prepare_label_for_echo_task(self, label: str) -> str: + label = label if re.match(r"^\s+", label) else f" {label}" + return label + "<|endoftext|>" + + def _compute_scores( + self, + normalized_probs_per_score: Mapping[str, Sequence[TokenWithProb]], + ) -> Mapping[str, Probability]: + return { + label: Probability( + math.prod(token_with_prob.prob for token_with_prob in tokens_with_probs) + ) + for label, tokens_with_probs in normalized_probs_per_score.items() + } + + def _normalize( + self, + log_probs_per_label: Mapping[str, Sequence[TokenWithProb]], + logger: DebugLogger, + ) -> Mapping[str, Sequence[TokenWithProb]]: + node = TreeNode() + for log_probs in log_probs_per_label.values(): + node.insert_path(log_probs) + + node.normalize_probs() + normalized_probs = { + label: list( + node.path( + token_with_prob.token + for token_with_prob in log_probs_per_label[label] + ) + ) + for label in log_probs_per_label + } + logger.log("Normalized Probs", normalized_probs) + return normalized_probs
+ + + +
+[docs] +class TreeNode: + def __init__( + self, token: Optional[Token] = None, prob: Optional[Probability] = None + ): + self.token = token + self.prob = prob + self.normalized_prob: Optional[Probability] = None + self.children: list[TreeNode] = [] + +
+[docs] + def find_child(self, token: Token) -> Optional["TreeNode"]: + return next((child for child in self.children if child.token == token), None)
+ + +
+[docs] + def insert_without_calculation(self, path: Sequence[TokenWithProb]) -> None: + """Inserts a path into the tree without changing the original probability + + Temporarily here until we change this data structure to be more versatile""" + if not path: + return + token_with_prob = path[0] + child = self.find_child(token_with_prob.token) + if child is None: + child = TreeNode(token_with_prob.token, Probability(token_with_prob.prob)) + self.children.append(child) + + child.insert_without_calculation(path[1:])
+ + +
+[docs] + def insert_path(self, path: Sequence[TokenWithProb]) -> None: + if not path: + return + token_with_prob = path[0] + prob = Probability(math.exp(token_with_prob.prob)) + + child = self.find_child(token_with_prob.token) + if child is None: + child = TreeNode(token_with_prob.token, prob) + self.children.append(child) + + child.insert_path(path[1:])
+ + +
+[docs] + def normalize_probs(self) -> None: + total_prob = sum( + child.prob for child in self.children if child.prob is not None + ) + for child in self.children: + if child.prob is not None: + child.normalized_prob = Probability(child.prob / total_prob) + child.normalize_probs()
+ + +
+[docs] + def path(self, tokens: Iterable[Token]) -> Iterable[TokenWithProb]: + node = self + for token in tokens: + child = node.find_child(token) + assert child + node = child + assert node.token and node.normalized_prob + yield TokenWithProb(token=node.token, prob=node.normalized_prob)
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/qa/long_context_qa.html b/_modules/intelligence_layer/use_cases/qa/long_context_qa.html new file mode 100644 index 000000000..bdb69b164 --- /dev/null +++ b/_modules/intelligence_layer/use_cases/qa/long_context_qa.html @@ -0,0 +1,204 @@ + + + + + + + intelligence_layer.use_cases.qa.long_context_qa — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.qa.long_context_qa

+from typing import Sequence
+
+from aleph_alpha_client import Client
+from pydantic import BaseModel
+from semantic_text_splitter import HuggingFaceTextSplitter
+from intelligence_layer.connectors.retrievers.base_retriever import Document
+
+from intelligence_layer.use_cases.qa.multiple_chunk_qa import (
+    MultipleChunkQa,
+    MultipleChunkQaInput,
+    MultipleChunkQaOutput,
+)
+from intelligence_layer.connectors.retrievers.in_memory_retriever import (
+    InMemoryRetriever,
+)
+from intelligence_layer.use_cases.search.search import Search, SearchInput
+from intelligence_layer.core.task import Chunk, Task
+from intelligence_layer.core.logger import DebugLogger
+
+
+
+[docs] +class LongContextQaInput(BaseModel): + """The input for a `LongContextQa` task. + + Attributes: + text: Text of arbitrary length on the basis of which the question is to be answered. + question: The question for the text. + """ + + text: str + question: str
+ + + +
+[docs] +class LongContextQa(Task[LongContextQaInput, MultipleChunkQaOutput]): + """Answer a question on the basis of a (lengthy) document. + + Best for answering a question on the basis of a long document, where the length + of text exceeds the context length of a model (e.g. 2048 tokens for the luminous models). + + Note: + - Creates instance of `InMemoryRetriever` on the fly. + - `model` provided should be a control-type model. + + Args: + client: Aleph Alpha client instance for running model related API calls. + max_tokens_in_chunk: The input text will be split into chunks to fit the context window. + Used to tweak the length of the chunks. + k: The number of top relevant chunks to retrieve. + model: A valid Aleph Alpha model name. + + Example: + >>> client = Client(os.getenv("AA_TOKEN")) + >>> task = LongContextQa(client) + >>> input = LongContextQaInput(text="Lengthy text goes here...", question="Where does the text go?") + >>> logger = InMemoryDebugLogger(name="Long Context QA") + >>> output = task.run(input, logger) + """ + + def __init__( + self, + client: Client, + max_tokens_in_chunk: int = 512, + k: int = 4, + model: str = "luminous-supreme-control", + ): + super().__init__() + self._client = client + self._model = model + self._max_tokens_in_chunk = max_tokens_in_chunk + self._tokenizer = self._client.tokenizer(model) + self._splitter = HuggingFaceTextSplitter(self._tokenizer, trim_chunks=True) + self._multi_chunk_qa = MultipleChunkQa(self._client, self._model) + self._k = k + +
+[docs] + def run( + self, input: LongContextQaInput, logger: DebugLogger + ) -> MultipleChunkQaOutput: + chunks = self._chunk(input.text) + logger.log("chunks", chunks) + retriever = InMemoryRetriever( + self._client, + documents=[Document(text=c) for c in chunks], + k=self._k, + threshold=0.5, + ) + search_output = Search(retriever).run(SearchInput(query=input.question), logger) + multi_chunk_qa_input = MultipleChunkQaInput( + chunks=[Chunk(result.document.text) for result in search_output.results], + question=input.question, + ) + qa_output = self._multi_chunk_qa.run(multi_chunk_qa_input, logger) + return qa_output
+ + + def _chunk(self, text: str) -> Sequence[Chunk]: + return [ + Chunk(t) for t in self._splitter.chunks(text, self._max_tokens_in_chunk) + ]
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/qa/multiple_chunk_qa.html b/_modules/intelligence_layer/use_cases/qa/multiple_chunk_qa.html new file mode 100644 index 000000000..4b07ff519 --- /dev/null +++ b/_modules/intelligence_layer/use_cases/qa/multiple_chunk_qa.html @@ -0,0 +1,275 @@ + + + + + + + intelligence_layer.use_cases.qa.multiple_chunk_qa — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.qa.multiple_chunk_qa

+from typing import Iterable, Optional, Sequence
+
+from aleph_alpha_client import Client
+from pydantic import BaseModel
+
+from intelligence_layer.core.complete import (
+    Instruct,
+    InstructInput,
+    InstructOutput,
+)
+from intelligence_layer.use_cases.qa.single_chunk_qa import (
+    SingleChunkQaInput,
+    SingleChunkQaOutput,
+    SingleChunkQa,
+)
+from intelligence_layer.core.task import Chunk, Task
+from intelligence_layer.core.logger import DebugLogger
+
+
+
+[docs] +class MultipleChunkQaInput(BaseModel): + """The input for a `MultipleChunkQa` task. + + Attributes: + chunks: The list of chunks that will be used to answer the question. + Can be arbitrarily long list of chunks. + question: The question that will be answered based on the chunks. + """ + + chunks: Sequence[Chunk] + question: str
+ + + +
+[docs] +class Subanswer(BaseModel): + """Individual answer based on just one of the multiple chunks. + + Attributes: + answer: The answer generated by the task. Can be a string or None (if no answer was found). + chunk: Piece of the original text that answer is based on. + highlights: The specific sentences that explain the answer the most. + These are generated by the `TextHighlight` Task. + """ + + answer: str + chunk: Chunk + highlights: Sequence[str]
+ + + +
+[docs] +class MultipleChunkQaOutput(BaseModel): + """The output of a `MultipleChunkQa` task. + + Attributes: + answer: The answer generated by the task. Can be a string or None (if no answer was found). + subanswers: All the subanswers used to generate the answer. + """ + + answer: Optional[str] + subanswers: Sequence[Subanswer]
+ + + +
+[docs] +class MultipleChunkQa(Task[MultipleChunkQaInput, MultipleChunkQaOutput]): + """Answer a question on the basis of a list of text chunks. + + Uses Aleph Alpha models to generate a natural language answer based on multiple text chunks. + Best for longer texts that are already split into smaller units (chunks). + Relies on SingleChunkQa to generate answers for each chunk and then merges the answers into a single final answer. + Includes logic to return 'answer = None' if the language model determines that the question cannot be + reliably answered on the basis of the chunks. + + Note: + `model` provided should be a control-type model. + + Args: + client: Aleph Alpha client instance for running model related API calls. + model: A valid Aleph Alpha model name. + + Attributes: + MERGE_ANSWERS_INSTRUCTION: The instruction template used for combining multiple answers into one. + + Example: + >>> client = Client(token="AA_TOKEN") + >>> task = MultipleChunkQa(client) + >>> input = MultipleChunkQaInput( + >>> chunks=["Tina does not like pizza.", "Mike is a big fan of pizza."], + >>> question="Who likes pizza?" + >>> ) + >>> logger = InMemoryLogger(name="Multiple Chunk QA") + >>> output = task.run(input, logger) + >>> print(output.answer) + Mike likes pizza. + """ + + MERGE_ANSWERS_INSTRUCTION = """You will be given a number of Answers to a Question. Based on them, generate a single final answer. +Condense multiple answers into a single answer. Rely only on the provided answers. Don't use the world's knowledge. The answer should combine the individual answers. If the answers contradict each other, e.g., one saying that the colour is green and the other saying that the colour is black, say that there are contradicting answers saying the colour is green or the colour is black.""" + + def __init__( + self, + client: Client, + model: str = "luminous-supreme-control", + ): + super().__init__() + self._client = client + self._instruction = Instruct(client) + self._single_chunk_qa = SingleChunkQa(client, model) + self._model = model + +
+[docs] + def run( + self, input: MultipleChunkQaInput, logger: DebugLogger + ) -> MultipleChunkQaOutput: + qa_outputs = self._single_chunk_qa.run_concurrently( + ( + SingleChunkQaInput(question=input.question, chunk=chunk) + for chunk in input.chunks + ), + logger, + ) + final_answer = self._merge_answers(input.question, qa_outputs, logger) + + return MultipleChunkQaOutput( + answer=final_answer, + subanswers=[ + Subanswer( + answer=qa_output.answer, + chunk=chunk, + highlights=qa_output.highlights, + ) + for qa_output, chunk in zip(qa_outputs, input.chunks) + if qa_output.answer + ], + )
+ + + def _merge_answers( + self, + question: str, + qa_outputs: Iterable[SingleChunkQaOutput], + logger: DebugLogger, + ) -> Optional[str]: + answers = [output.answer for output in qa_outputs if output.answer] + if len(answers) == 0: + return None + elif len(answers) == 1: + return answers[0] + + joined_answers = "\n".join(answers) + return self._instruct( + f"""Question: {question} + +Answers: +{joined_answers}""", + logger, + ).response + + def _instruct(self, input: str, logger: DebugLogger) -> InstructOutput: + return self._instruction.run( + InstructInput( + instruction=self.MERGE_ANSWERS_INSTRUCTION, + input=input, + model=self._model, + response_prefix="\nFinal answer:", + ), + logger, + )
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/qa/retriever_based_qa.html b/_modules/intelligence_layer/use_cases/qa/retriever_based_qa.html new file mode 100644 index 000000000..b71f9348b --- /dev/null +++ b/_modules/intelligence_layer/use_cases/qa/retriever_based_qa.html @@ -0,0 +1,183 @@ + + + + + + + intelligence_layer.use_cases.qa.retriever_based_qa — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.qa.retriever_based_qa

+from aleph_alpha_client import (
+    Client,
+)
+from pydantic import BaseModel
+
+from intelligence_layer.use_cases.qa.multiple_chunk_qa import (
+    MultipleChunkQa,
+    MultipleChunkQaInput,
+    MultipleChunkQaOutput,
+)
+from intelligence_layer.connectors.retrievers.base_retriever import BaseRetriever
+from intelligence_layer.use_cases.search.search import Search, SearchInput
+from intelligence_layer.core.task import Chunk, Task
+from intelligence_layer.core.logger import DebugLogger
+
+
+
+[docs] +class RetrieverBasedQaInput(BaseModel): + """The input for a `RetrieverBasedQa` task. + + Attributes: + question: The question to be answered based on the documents accessed + by the retriever. + """ + + question: str
+ + + +
+[docs] +class RetrieverBasedQa(Task[RetrieverBasedQaInput, MultipleChunkQaOutput]): + """Answer a question based on documents found by a retriever. + + RetrieverBasedQa` is a task that answers a question based on a set of documents. + Relies on some retriever of type `BaseRetriever` that has the ability to access texts. + + Note: + `model` provided should be a control-type model. + + Args: + client: Aleph Alpha client instance for running model related API calls. + retriever: Used to access and return a set of texts. + model: A valid Aleph Alpha model name. + + Example: + >>> token = os.getenv("AA_TOKEN") + >>> client = Client(token) + >>> document_index = DocumentIndex(token) + >>> retriever = DocumentIndexRetriever(document_index, "my_namespace", "ancient_facts_collection", 3) + >>> task = RetrieverBasedQa(client, retriever) + >>> input_data = RetrieverBasedQaInput(question="When was Rome founded?") + >>> logger = InMemoryDebugLogger(name="Retriever Based QA") + >>> output = task.run(input_data, logger) + >>> print(output.answer) + Rome was founded in 753 BC. + """ + + def __init__( + self, + client: Client, + retriever: BaseRetriever, + model: str = "luminous-supreme-control", + ): + super().__init__() + self._client = client + self._model = model + self._search = Search(retriever) + self._multi_chunk_qa = MultipleChunkQa(self._client, self._model) + +
+[docs] + def run( + self, input: RetrieverBasedQaInput, logger: DebugLogger + ) -> MultipleChunkQaOutput: + search_output = self._search.run(SearchInput(query=input.question), logger) + multi_chunk_qa_input = MultipleChunkQaInput( + chunks=[Chunk(result.document.text) for result in search_output.results], + question=input.question, + ) + return self._multi_chunk_qa.run(multi_chunk_qa_input, logger)
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/qa/single_chunk_qa.html b/_modules/intelligence_layer/use_cases/qa/single_chunk_qa.html new file mode 100644 index 000000000..929333da7 --- /dev/null +++ b/_modules/intelligence_layer/use_cases/qa/single_chunk_qa.html @@ -0,0 +1,261 @@ + + + + + + + intelligence_layer.use_cases.qa.single_chunk_qa — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.qa.single_chunk_qa

+from typing import Optional, Sequence
+
+from aleph_alpha_client import Client
+from pydantic import BaseModel
+
+from intelligence_layer.core.complete import (
+    Instruct,
+    InstructInput,
+    InstructOutput,
+)
+from intelligence_layer.core.prompt_template import (
+    PromptWithMetadata,
+)
+from intelligence_layer.core.text_highlight import (
+    TextHighlight,
+    TextHighlightInput,
+)
+from intelligence_layer.core.task import Chunk, Task
+from intelligence_layer.core.logger import DebugLogger
+
+
+
+[docs] +class SingleChunkQaInput(BaseModel): + """The input for a `SingleChunkQa` task. + + Attributes: + chunk: The (short) text to be asked about. Usually measures one or a few paragraph(s). + Can't be longer than the context length of the model used minus the size of the system prompt. + question: The question to be asked by about the chunk. + """ + + chunk: Chunk + question: str
+ + + +
+[docs] +class SingleChunkQaOutput(BaseModel): + """The output of a `SingleChunkQa` task. + + Attributes: + answer: The answer generated by the task. Can be a string or None (if no answer was found). + highlights: Highlights indicating which parts of the chunk contributed to the answer. + Each highlight is a quote from the text. + """ + + answer: Optional[str] + highlights: Sequence[str]
+ + + +
+[docs] +class SingleChunkQa(Task[SingleChunkQaInput, SingleChunkQaOutput]): + """Answer a question on the basis of one chunk. + + Uses Aleph Alpha models to generate a natural language answer for a text chunk given a question. + Will answer `None` if the language model determines that the question cannot be answered on the + basis of the text. + + Note: + `model` provided should be a control-type model. + + Args: + client: Aleph Alpha client instance for running model related API calls. + model: A valid Aleph Alpha model name. + + Attributes: + PROMPT_TEMPLATE_STR: The prompt template used for answering the question. + Includes liquid logic interpreted by 'PromptTemplate' specifically for generating + explainability-based highlights using `TextHighlight`. + NO_ANSWER_STR: The string to be generated by the model in case no answer can be found. + + Example: + >>> client = Client(os.getenv("AA_TOKEN")) + >>> task = SingleChunkQa(client) + >>> input = SingleChunkQaInput( + >>> chunk="Tina does not like pizza. However, Mike does.", + >>> question="Who likes pizza?" + >>> ) + >>> logger = InMemoryLogger(name="Single Chunk QA") + >>> output = task.run(input, logger) + >>> print(output.answer) + Mike likes pizza. + """ + + PROMPT_TEMPLATE_STR = """### Instruction: +{{question}} +If there's no answer, say "{{no_answer_text}}". + +### Input: +{% promptrange text %}{{text}}{% endpromptrange %} + +### Response:""" + NO_ANSWER_STR = "NO_ANSWER_IN_TEXT" + + def __init__( + self, + client: Client, + model: str = "luminous-supreme-control", + ): + super().__init__() + self._client = client + self._model = model + self._instruction = Instruct(client) + self._text_highlight = TextHighlight(client) + +
+[docs] + def run( + self, input: SingleChunkQaInput, logger: DebugLogger + ) -> SingleChunkQaOutput: + output = self._instruct( + f"""{input.question} +If there's no answer, say "{self.NO_ANSWER_STR}".""", + input.chunk, + logger, + ) + answer = self._no_answer_to_none(output.response.strip()) + highlights = ( + self._get_highlights( + output.prompt_with_metadata, + output.response, + logger, + ) + if answer + else [] + ) + return SingleChunkQaOutput( + answer=answer, + highlights=highlights, + )
+ + + def _instruct( + self, instruction: str, input: str, logger: DebugLogger + ) -> InstructOutput: + return self._instruction.run( + InstructInput(instruction=instruction, input=input, model=self._model), + logger, + ) + + def _get_highlights( + self, + prompt_with_metadata: PromptWithMetadata, + completion: str, + logger: DebugLogger, + ) -> Sequence[str]: + highlight_input = TextHighlightInput( + prompt_with_metadata=prompt_with_metadata, + target=completion, + model=self._model, + focus_ranges=frozenset({"input"}), + ) + highlight_output = self._text_highlight.run(highlight_input, logger) + return [h.text for h in highlight_output.highlights if h.score > 0] + + def _no_answer_to_none(self, completion: str) -> Optional[str]: + return completion if completion != self.NO_ANSWER_STR else None
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/search/filter_search.html b/_modules/intelligence_layer/use_cases/search/filter_search.html new file mode 100644 index 000000000..cee4a106a --- /dev/null +++ b/_modules/intelligence_layer/use_cases/search/filter_search.html @@ -0,0 +1,174 @@ + + + + + + + intelligence_layer.use_cases.search.filter_search — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.search.filter_search

+from pydantic import BaseModel
+from qdrant_client.http.models.models import Filter
+
+from intelligence_layer.connectors.retrievers.in_memory_retriever import (
+    InMemoryRetriever,
+)
+from intelligence_layer.core.task import Task
+from intelligence_layer.core.logger import DebugLogger
+from intelligence_layer.use_cases.search.search import SearchOutput
+
+
+
+[docs] +class FilterSearchInput(BaseModel): + """The input for a `FilterSearch` task. + + Attributes: + query: The text to be searched with. + filter: Conditions to filter by as offered by Qdrant. + """ + + query: str + filter: Filter
+ + + +
+[docs] +class FilterSearch(Task[FilterSearchInput, SearchOutput]): + """Performs search to find documents using QDrant filtering methods. + + Given a query, this task will utilize a retriever to fetch relevant text search results. + Contrary to `Search`, this `Task` offers the option to filter. + + Args: + in_memory_retriever: Implements logic to retrieve matching texts to the query. + + Example: + >>> client = Client(os.getenv("AA_TOKEN")) + >>> documents = [ + >>> Document( + >>> text="West and East Germany reunited in 1990. + >>> metadata={"title": "Germany"} + >>> ) + >>> ] + >>> retriever = InMemoryRetriever(client, documents) + >>> task = FilterSearch(retriever) + >>> input = FilterSearchInput( + >>> query="When did East and West Germany reunite?" + >>> filter=models.Filter( + >>> must=[ + >>> models.FieldCondition( + >>> key="metadata.title", + >>> match="Germany", + >>> ), + >>> ] + >>> ) + >>> ) + >>> logger = InMemoryLogger(name="Filter Search") + >>> output = task.run(input, logger) + """ + + def __init__(self, in_memory_retriever: InMemoryRetriever): + super().__init__() + self._in_memory_retriever = in_memory_retriever + +
+[docs] + def run(self, input: FilterSearchInput, logger: DebugLogger) -> SearchOutput: + results = self._in_memory_retriever.get_filtered_documents_with_scores( + input.query, input.filter + ) + return SearchOutput(results=results)
+
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/search/search.html b/_modules/intelligence_layer/use_cases/search/search.html new file mode 100644 index 000000000..caaa3de15 --- /dev/null +++ b/_modules/intelligence_layer/use_cases/search/search.html @@ -0,0 +1,173 @@ + + + + + + + intelligence_layer.use_cases.search.search — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.search.search

+from typing import Sequence
+
+from pydantic import BaseModel
+
+from intelligence_layer.connectors.retrievers.base_retriever import (
+    BaseRetriever,
+    SearchResult,
+)
+from intelligence_layer.core.task import Task
+from intelligence_layer.core.logger import DebugLogger
+
+
+
+[docs] +class SearchInput(BaseModel): + """The input for a `Search` task. + + Attributes: + query: The text to be searched with. + """ + + query: str
+ + + +
+[docs] +class SearchOutput(BaseModel): + """The output of a `Search` task. + + Attributes: + results: Each result contains a text and corresponding score. + """ + + results: Sequence[SearchResult]
+ + + + + +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_modules/intelligence_layer/use_cases/summarize/summarize.html b/_modules/intelligence_layer/use_cases/summarize/summarize.html new file mode 100644 index 000000000..5744270a8 --- /dev/null +++ b/_modules/intelligence_layer/use_cases/summarize/summarize.html @@ -0,0 +1,224 @@ + + + + + + + intelligence_layer.use_cases.summarize.summarize — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Source code for intelligence_layer.use_cases.summarize.summarize

+from typing import Sequence
+
+from aleph_alpha_client import Client
+from pydantic import BaseModel
+
+from intelligence_layer.core.complete import (
+    Instruct,
+    InstructInput,
+    InstructOutput,
+)
+from intelligence_layer.core.prompt_template import PromptWithMetadata
+from intelligence_layer.core.task import Chunk, Task
+from intelligence_layer.core.logger import DebugLogger
+from intelligence_layer.core.text_highlight import TextHighlight, TextHighlightInput
+
+
+
+[docs] +class SummarizeInput(BaseModel): + """The input for a `Summarize` task. + + Attributes: + chunk: The text chunk to be summarized. + """ + + chunk: Chunk
+ + + +
+[docs] +class SummarizeOutput(BaseModel): + """The output of a `Summarize` task. + + Attributes: + summary: The summary generated by the task. + highlights: Highlights indicating which parts of the chunk contributed to the summary. + Each highlight is a quote from the text. + """ + + summary: str + highlights: Sequence[str]
+ + + +
+[docs] +class ShortBodySummarize(Task[SummarizeInput, SummarizeOutput]): + """Summarises a section into a short text. + + Generate a short body natural language summary. + Will also return highlights explaining which parts of the input contributed strongly to the completion. + + Note: + `model` provided should be a control-type model. + + Args: + client: Aleph Alpha client instance for running model related API calls. + model: A valid Aleph Alpha model name. + + Attributes: + MAXIMUM_RESPONSE_TOKENS: The maximum number of tokens the summary will contain. + INSTRUCTION: The verbal instruction sent to the model to make it generate the summary. + + Example: + >>> client = Client(os.getenv("AA_TOKEN")) + >>> task = ShortBodySummarize(client) + >>> input = SummarizeInput( + >>> chunk="This is a story about pizza. Tina hates pizza. However, Mike likes it. Pete strongly believes that pizza is the best thing to exist." + >>> ) + >>> logger = InMemoryLogger(name="Short Body Summarize") + >>> output = task.run(input, logger) + >>> print(output.summary) + Tina does not like pizza, but Mike and Pete do. + """ + + MAXIMUM_RESPONSE_TOKENS = 128 + INSTRUCTION = "Summarize in just one or two sentences." + _client: Client + + def __init__(self, client: Client, model: str = "luminous-supreme-control") -> None: + super().__init__() + self._client = client + self._model = model + self._instruction = Instruct(client) + self._text_highlight = TextHighlight(client) + +
+[docs] + def run(self, input: SummarizeInput, logger: DebugLogger) -> SummarizeOutput: + instruction_output = self._instruct(input.chunk, logger) + highlights = self._get_highlights( + instruction_output.prompt_with_metadata, instruction_output.response, logger + ) + return SummarizeOutput( + summary=instruction_output.response, highlights=highlights + )
+ + + def _instruct(self, input: str, logger: DebugLogger) -> InstructOutput: + return self._instruction.run( + InstructInput( + instruction=self.INSTRUCTION, + input=input, + maximum_response_tokens=self.MAXIMUM_RESPONSE_TOKENS, + model=self._model, + ), + logger, + ) + + def _get_highlights( + self, + prompt_with_metadata: PromptWithMetadata, + completion: str, + logger: DebugLogger, + ) -> Sequence[str]: + highlight_input = TextHighlightInput( + prompt_with_metadata=prompt_with_metadata, + target=completion, + model=self._model, + focus_ranges=frozenset({"input"}), + ) + highlight_output = self._text_highlight.run(highlight_input, logger) + return [h.text for h in highlight_output.highlights if h.score > 0]
+ +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt new file mode 100644 index 000000000..e7cfe8c60 --- /dev/null +++ b/_sources/index.rst.txt @@ -0,0 +1,20 @@ +.. Intelligence Layer documentation master file, created by + sphinx-quickstart on Fri Oct 27 14:17:00 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Intelligence Layer's documentation! +============================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + modules + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/_sources/intelligence_layer.connectors.retrievers.rst.txt b/_sources/intelligence_layer.connectors.retrievers.rst.txt new file mode 100644 index 000000000..8269b79bb --- /dev/null +++ b/_sources/intelligence_layer.connectors.retrievers.rst.txt @@ -0,0 +1,37 @@ +intelligence\_layer.connectors.retrievers package +================================================= + +Submodules +---------- + +intelligence\_layer.connectors.retrievers.base\_retriever module +---------------------------------------------------------------- + +.. automodule:: intelligence_layer.connectors.retrievers.base_retriever + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.connectors.retrievers.document\_index\_retriever module +--------------------------------------------------------------------------- + +.. automodule:: intelligence_layer.connectors.retrievers.document_index_retriever + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.connectors.retrievers.in\_memory\_retriever module +---------------------------------------------------------------------- + +.. automodule:: intelligence_layer.connectors.retrievers.in_memory_retriever + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: intelligence_layer.connectors.retrievers + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/intelligence_layer.connectors.rst.txt b/_sources/intelligence_layer.connectors.rst.txt new file mode 100644 index 000000000..02fc15ba3 --- /dev/null +++ b/_sources/intelligence_layer.connectors.rst.txt @@ -0,0 +1,29 @@ +intelligence\_layer.connectors package +====================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + intelligence_layer.connectors.retrievers + +Submodules +---------- + +intelligence\_layer.connectors.document\_index module +----------------------------------------------------- + +.. automodule:: intelligence_layer.connectors.document_index + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: intelligence_layer.connectors + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/intelligence_layer.core.rst.txt b/_sources/intelligence_layer.core.rst.txt new file mode 100644 index 000000000..ae747d3de --- /dev/null +++ b/_sources/intelligence_layer.core.rst.txt @@ -0,0 +1,77 @@ +intelligence\_layer.core package +================================ + +Submodules +---------- + +intelligence\_layer.core.complete module +---------------------------------------- + +.. automodule:: intelligence_layer.core.complete + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.core.echo module +------------------------------------ + +.. automodule:: intelligence_layer.core.echo + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.core.evaluator module +----------------------------------------- + +.. automodule:: intelligence_layer.core.evaluator + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.core.explain module +--------------------------------------- + +.. automodule:: intelligence_layer.core.explain + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.core.logger module +-------------------------------------- + +.. automodule:: intelligence_layer.core.logger + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.core.prompt\_template module +------------------------------------------------ + +.. automodule:: intelligence_layer.core.prompt_template + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.core.task module +------------------------------------ + +.. automodule:: intelligence_layer.core.task + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.core.text\_highlight module +----------------------------------------------- + +.. automodule:: intelligence_layer.core.text_highlight + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: intelligence_layer.core + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/intelligence_layer.rst.txt b/_sources/intelligence_layer.rst.txt new file mode 100644 index 000000000..a320df023 --- /dev/null +++ b/_sources/intelligence_layer.rst.txt @@ -0,0 +1,20 @@ +intelligence\_layer package +=========================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + intelligence_layer.connectors + intelligence_layer.core + intelligence_layer.use_cases + +Module contents +--------------- + +.. automodule:: intelligence_layer + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/intelligence_layer.use_cases.classify.rst.txt b/_sources/intelligence_layer.use_cases.classify.rst.txt new file mode 100644 index 000000000..ee7deaba1 --- /dev/null +++ b/_sources/intelligence_layer.use_cases.classify.rst.txt @@ -0,0 +1,37 @@ +intelligence\_layer.use\_cases.classify package +=============================================== + +Submodules +---------- + +intelligence\_layer.use\_cases.classify.classify module +------------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.classify.classify + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.use\_cases.classify.embedding\_based\_classify module +------------------------------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.classify.embedding_based_classify + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.use\_cases.classify.single\_label\_classify module +---------------------------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.classify.single_label_classify + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: intelligence_layer.use_cases.classify + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/intelligence_layer.use_cases.qa.rst.txt b/_sources/intelligence_layer.use_cases.qa.rst.txt new file mode 100644 index 000000000..925108328 --- /dev/null +++ b/_sources/intelligence_layer.use_cases.qa.rst.txt @@ -0,0 +1,45 @@ +intelligence\_layer.use\_cases.qa package +========================================= + +Submodules +---------- + +intelligence\_layer.use\_cases.qa.long\_context\_qa module +---------------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.qa.long_context_qa + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.use\_cases.qa.multiple\_chunk\_qa module +------------------------------------------------------------ + +.. automodule:: intelligence_layer.use_cases.qa.multiple_chunk_qa + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.use\_cases.qa.retriever\_based\_qa module +------------------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.qa.retriever_based_qa + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.use\_cases.qa.single\_chunk\_qa module +---------------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.qa.single_chunk_qa + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: intelligence_layer.use_cases.qa + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/intelligence_layer.use_cases.rst.txt b/_sources/intelligence_layer.use_cases.rst.txt new file mode 100644 index 000000000..ada2ac28b --- /dev/null +++ b/_sources/intelligence_layer.use_cases.rst.txt @@ -0,0 +1,21 @@ +intelligence\_layer.use\_cases package +====================================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + intelligence_layer.use_cases.classify + intelligence_layer.use_cases.qa + intelligence_layer.use_cases.search + intelligence_layer.use_cases.summarize + +Module contents +--------------- + +.. automodule:: intelligence_layer.use_cases + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/intelligence_layer.use_cases.search.rst.txt b/_sources/intelligence_layer.use_cases.search.rst.txt new file mode 100644 index 000000000..623ac89c0 --- /dev/null +++ b/_sources/intelligence_layer.use_cases.search.rst.txt @@ -0,0 +1,29 @@ +intelligence\_layer.use\_cases.search package +============================================= + +Submodules +---------- + +intelligence\_layer.use\_cases.search.filter\_search module +----------------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.search.filter_search + :members: + :undoc-members: + :show-inheritance: + +intelligence\_layer.use\_cases.search.search module +--------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.search.search + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: intelligence_layer.use_cases.search + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/intelligence_layer.use_cases.summarize.rst.txt b/_sources/intelligence_layer.use_cases.summarize.rst.txt new file mode 100644 index 000000000..88fd534ba --- /dev/null +++ b/_sources/intelligence_layer.use_cases.summarize.rst.txt @@ -0,0 +1,21 @@ +intelligence\_layer.use\_cases.summarize package +================================================ + +Submodules +---------- + +intelligence\_layer.use\_cases.summarize.summarize module +--------------------------------------------------------- + +.. automodule:: intelligence_layer.use_cases.summarize.summarize + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: intelligence_layer.use_cases.summarize + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/modules.rst.txt b/_sources/modules.rst.txt new file mode 100644 index 000000000..8dd14ac28 --- /dev/null +++ b/_sources/modules.rst.txt @@ -0,0 +1,7 @@ +src +=== + +.. toctree:: + :maxdepth: 4 + + intelligence_layer diff --git a/_static/alabaster.css b/_static/alabaster.css new file mode 100644 index 000000000..517d0b29c --- /dev/null +++ b/_static/alabaster.css @@ -0,0 +1,703 @@ +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Georgia, serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Georgia, serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: Georgia, serif; + font-size: 1em; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +div.sphinxsidebar .badge { + border-bottom: none; +} + +div.sphinxsidebar .badge:hover { + border-bottom: none; +} + +/* To address an issue with donation coming after search */ +div.sphinxsidebar h3.donation { + margin-top: 10px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Georgia, serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fafafa; +} + +div.admonition p.admonition-title { + font-family: Georgia, serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: #fff; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin-left: 0; + margin-right: 0; + margin-top: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #EEE; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + + +@media screen and (max-width: 870px) { + + div.sphinxsidebar { + display: none; + } + + div.document { + width: 100%; + + } + + div.documentwrapper { + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.bodywrapper { + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + margin-left: 0; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .bodywrapper { + margin: 0; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + + +} + + + +@media screen and (max-width: 875px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + } + + div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + padding: 0; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Make nested-list/multi-paragraph items look better in Releases changelog + * pages. Without this, docutils' magical list fuckery causes inconsistent + * formatting between different release sub-lists. + */ +div#changelog > div.section > ul > li > p:only-child { + margin-bottom: 0; +} + +/* Hide fugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + + +/* relbar */ + +.related { + line-height: 30px; + width: 100%; + font-size: 0.9rem; +} + +.related.top { + border-bottom: 1px solid #EEE; + margin-bottom: 20px; +} + +.related.bottom { + border-top: 1px solid #EEE; +} + +.related ul { + padding: 0; + margin: 0; + list-style: none; +} + +.related li { + display: inline; +} + +nav#rellinks { + float: right; +} + +nav#rellinks li+li:before { + content: "|"; +} + +nav#breadcrumbs li+li:before { + content: "\00BB"; +} + +/* Hide certain items when printing */ +@media print { + div.related { + display: none; + } +} \ No newline at end of file diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 000000000..30fee9d0f --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/custom.css b/_static/custom.css new file mode 100644 index 000000000..2a924f1d6 --- /dev/null +++ b/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 000000000..d06a71d75 --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 000000000..7e4c114f2 --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 000000000..a858a410e Binary files /dev/null and b/_static/file.png differ diff --git a/_static/language_data.js b/_static/language_data.js new file mode 100644 index 000000000..250f5665f --- /dev/null +++ b/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, is available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/_static/minus.png b/_static/minus.png new file mode 100644 index 000000000..d96755fda Binary files /dev/null and b/_static/minus.png differ diff --git a/_static/plus.png b/_static/plus.png new file mode 100644 index 000000000..7107cec93 Binary files /dev/null and b/_static/plus.png differ diff --git a/_static/pygments.css b/_static/pygments.css new file mode 100644 index 000000000..57c7df37b --- /dev/null +++ b/_static/pygments.css @@ -0,0 +1,84 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #8f5902; font-style: italic } /* Comment */ +.highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */ +.highlight .g { color: #000000 } /* Generic */ +.highlight .k { color: #004461; font-weight: bold } /* Keyword */ +.highlight .l { color: #000000 } /* Literal */ +.highlight .n { color: #000000 } /* Name */ +.highlight .o { color: #582800 } /* Operator */ +.highlight .x { color: #000000 } /* Other */ +.highlight .p { color: #000000; font-weight: bold } /* Punctuation */ +.highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #8f5902 } /* Comment.Preproc */ +.highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #a40000 } /* Generic.Deleted */ +.highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */ +.highlight .ges { color: #000000 } /* Generic.EmphStrong */ +.highlight .gr { color: #ef2929 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #888888 } /* Generic.Output */ +.highlight .gp { color: #745334 } /* Generic.Prompt */ +.highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */ +.highlight .kc { color: #004461; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #004461; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #004461; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #004461; font-weight: bold } /* Keyword.Pseudo */ +.highlight .kr { color: #004461; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #004461; font-weight: bold } /* Keyword.Type */ +.highlight .ld { color: #000000 } /* Literal.Date */ +.highlight .m { color: #990000 } /* Literal.Number */ +.highlight .s { color: #4e9a06 } /* Literal.String */ +.highlight .na { color: #c4a000 } /* Name.Attribute */ +.highlight .nb { color: #004461 } /* Name.Builtin */ +.highlight .nc { color: #000000 } /* Name.Class */ +.highlight .no { color: #000000 } /* Name.Constant */ +.highlight .nd { color: #888888 } /* Name.Decorator */ +.highlight .ni { color: #ce5c00 } /* Name.Entity */ +.highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #000000 } /* Name.Function */ +.highlight .nl { color: #f57900 } /* Name.Label */ +.highlight .nn { color: #000000 } /* Name.Namespace */ +.highlight .nx { color: #000000 } /* Name.Other */ +.highlight .py { color: #000000 } /* Name.Property */ +.highlight .nt { color: #004461; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #000000 } /* Name.Variable */ +.highlight .ow { color: #004461; font-weight: bold } /* Operator.Word */ +.highlight .pm { color: #000000; font-weight: bold } /* Punctuation.Marker */ +.highlight .w { color: #f8f8f8; text-decoration: underline } /* Text.Whitespace */ +.highlight .mb { color: #990000 } /* Literal.Number.Bin */ +.highlight .mf { color: #990000 } /* Literal.Number.Float */ +.highlight .mh { color: #990000 } /* Literal.Number.Hex */ +.highlight .mi { color: #990000 } /* Literal.Number.Integer */ +.highlight .mo { color: #990000 } /* Literal.Number.Oct */ +.highlight .sa { color: #4e9a06 } /* Literal.String.Affix */ +.highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */ +.highlight .sc { color: #4e9a06 } /* Literal.String.Char */ +.highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */ +.highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4e9a06 } /* Literal.String.Double */ +.highlight .se { color: #4e9a06 } /* Literal.String.Escape */ +.highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */ +.highlight .si { color: #4e9a06 } /* Literal.String.Interpol */ +.highlight .sx { color: #4e9a06 } /* Literal.String.Other */ +.highlight .sr { color: #4e9a06 } /* Literal.String.Regex */ +.highlight .s1 { color: #4e9a06 } /* Literal.String.Single */ +.highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */ +.highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #000000 } /* Name.Function.Magic */ +.highlight .vc { color: #000000 } /* Name.Variable.Class */ +.highlight .vg { color: #000000 } /* Name.Variable.Global */ +.highlight .vi { color: #000000 } /* Name.Variable.Instance */ +.highlight .vm { color: #000000 } /* Name.Variable.Magic */ +.highlight .il { color: #990000 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/searchtools.js b/_static/searchtools.js new file mode 100644 index 000000000..7918c3fab --- /dev/null +++ b/_static/searchtools.js @@ -0,0 +1,574 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js new file mode 100644 index 000000000..8a96c69a1 --- /dev/null +++ b/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/genindex.html b/genindex.html new file mode 100644 index 000000000..9dfc4962e --- /dev/null +++ b/genindex.html @@ -0,0 +1,1401 @@ + + + + + + + Index — Intelligence Layer documentation + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + +

Index

+ +
+ A + | B + | C + | D + | E + | F + | G + | H + | I + | J + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + +
+

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + +
+ +

I

+ + + +
    +
  • + intelligence_layer.core.logger + +
  • +
  • + intelligence_layer.core.prompt_template + +
  • +
  • + intelligence_layer.core.task + +
  • +
  • + intelligence_layer.core.text_highlight + +
  • +
  • + intelligence_layer.use_cases + +
  • +
  • + intelligence_layer.use_cases.classify + +
  • +
  • + intelligence_layer.use_cases.classify.classify + +
  • +
  • + intelligence_layer.use_cases.classify.embedding_based_classify + +
  • +
  • + intelligence_layer.use_cases.classify.single_label_classify + +
  • +
  • + intelligence_layer.use_cases.qa + +
  • +
  • + intelligence_layer.use_cases.qa.long_context_qa + +
  • +
  • + intelligence_layer.use_cases.qa.multiple_chunk_qa + +
  • +
  • + intelligence_layer.use_cases.qa.retriever_based_qa + +
  • +
  • + intelligence_layer.use_cases.qa.single_chunk_qa + +
  • +
  • + intelligence_layer.use_cases.search + +
  • +
  • + intelligence_layer.use_cases.search.filter_search + +
  • +
  • + intelligence_layer.use_cases.search.search + +
  • +
  • + intelligence_layer.use_cases.summarize + +
  • +
  • + intelligence_layer.use_cases.summarize.summarize + +
  • +
  • item (intelligence_layer.core.prompt_template.PromptItemCursor attribute) + +
  • +
+ +

J

+ + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + +
+ +

P

+ + + +
+ +

Q

+ + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + +
+ +

V

+ + +
+ + + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 000000000..8ff9c6996 --- /dev/null +++ b/index.html @@ -0,0 +1,122 @@ + + + + + + + + Welcome to Intelligence Layer’s documentation! — Intelligence Layer documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

Welcome to Intelligence Layer’s documentation!

+
+

Contents:

+ +
+
+
+

Indices and tables

+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.connectors.html b/intelligence_layer.connectors.html new file mode 100644 index 000000000..18777a859 --- /dev/null +++ b/intelligence_layer.connectors.html @@ -0,0 +1,253 @@ + + + + + + + + intelligence_layer.connectors package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer.connectors package

+
+

Subpackages

+ +
+
+

Submodules

+
+
+

intelligence_layer.connectors.document_index module

+
+
+class intelligence_layer.connectors.document_index.DocumentIndex(token: str, base_document_index_url: str = 'https://knowledge.aleph-alpha.com')[source]
+

Bases: object

+

Client for the Document Index allowing handling documents and search.

+

Document Index is a tool for managing collections of documents, enabling operations such as creation, deletion, listing, and searching. +Documents can be stored either in the cloud or in a local deployment.

+
+
Parameters:
+
    +
  • token – A valid token for the document index API.

  • +
  • base_document_index_url – The url of the document index’ API.

  • +
+
+
+

Example

+
>>> document_index = DocumentIndex(os.getenv("AA_TOKEN"))
+>>> document_index.create_collection(namespace="my_namespace", collection="germany_facts_collection")
+>>> document_index.add_document(
+>>>     namespace="my_namespace",
+>>>     collection="germany_facts_collection",
+>>>     name="Fun facts about Germany",
+>>>     content="Germany is a country located in ..."
+>>> )
+>>> documents = document_index.search(
+>>>     namespace="my_namespace",
+>>>     collection="germany_facts_collection",
+>>>     query: "What is the capital of Germany",
+>>>     max_results=4,
+>>>     min_score: 0.5
+>>> )
+
+
+
+
+add_document(namespace: str, collection: str, name: str, content: str) None[source]
+
+ +
+
+create_collection(namespace: str, collection: str) None[source]
+
+ +
+
+delete_collection(namespace: str, collection: str) None[source]
+
+ +
+
+delete_document(namespace: str, collection: str, name: str) None[source]
+
+ +
+
+get_document(namespace: str, collection: str, name: str, get_chunks: bool = False) Any[source]
+
+ +
+
+list_documents(namespace: str, collection: str) Any[source]
+
+ +
+
+search(namespace: str, collection: str, query: str, max_results: int, min_score: float) Any[source]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.connectors.retrievers.html b/intelligence_layer.connectors.retrievers.html new file mode 100644 index 000000000..fe8fb6b71 --- /dev/null +++ b/intelligence_layer.connectors.retrievers.html @@ -0,0 +1,360 @@ + + + + + + + + intelligence_layer.connectors.retrievers package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer.connectors.retrievers package

+
+

Submodules

+
+
+

intelligence_layer.connectors.retrievers.base_retriever module

+
+
+class intelligence_layer.connectors.retrievers.base_retriever.BaseRetriever[source]
+

Bases: ABC

+

General interface for any retriever.

+

Retrievers are used to find texts given a user query. +Each Retriever implementation owns its own logic for retrieval. +For comparison purposes, we assume scores in the `SearchResult`s to be between 0 and 1.

+
+
+abstract get_relevant_documents_with_scores(query: str) Sequence[SearchResult][source]
+
+ +
+ +
+
+class intelligence_layer.connectors.retrievers.base_retriever.Document(*, text: str, metadata: Any = None)[source]
+

Bases: BaseModel

+

Document abstraction, specifically for retrieval use cases.

+
+
+text
+

The document’s text.

+
+
Type:
+

str

+
+
+
+ +
+
+metadata
+

Any json-serializable object.

+
+
Type:
+

Any

+
+
+
+ +
+
+metadata: Any
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'metadata': FieldInfo(annotation=Any, required=False), 'text': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+text: str
+
+ +
+ +
+
+class intelligence_layer.connectors.retrievers.base_retriever.SearchResult(*, score: float, document: Document)[source]
+

Bases: BaseModel

+

Contains a text alongside its search score.

+
+
+score
+

The similarity score between the text and the query that was searched with. +Will be between 0 and 1, where 0 means no similarity and 1 perfect similarity.

+
+
Type:
+

float

+
+
+
+ +
+
+document
+

The document found by search.

+
+
Type:
+

intelligence_layer.connectors.retrievers.base_retriever.Document

+
+
+
+ +
+
+document: Document
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'document': FieldInfo(annotation=Document, required=True), 'score': FieldInfo(annotation=float, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+score: float
+
+ +
+ +
+
+

intelligence_layer.connectors.retrievers.document_index_retriever module

+
+
+class intelligence_layer.connectors.retrievers.document_index_retriever.DocumentIndexRetriever(document_index: DocumentIndex, namespace: str, collection: str, k: int, threshold: float = 0.5)[source]
+

Bases: BaseRetriever

+

Search through documents within collections in the DocumentIndex.

+

We initialize this Retriever with a collection & namespace names, and we can find the documents in the collection +most semanticly similar to our query.

+
+
Parameters:
+
    +
  • document_index – Client offering functionality for search.

  • +
  • namespace – The namespace within the DocumentIndex where all collections are stored.

  • +
  • collection – The collection within the namespace that holds the desired documents.

  • +
  • k – The (top) number of documents to be returned by search.

  • +
  • threshold – The mimumum value of cosine similarity between the query vector and the document vector.

  • +
+
+
+

Example

+
>>> document_index = DocumentIndex(os.getenv("AA_TOKEN"))
+>>> retriever = DocumentIndexRetriever(document_index, "my_namespace", "airplane_facts_collection", 3)
+>>> query = "Who invented the airplane?"
+>>> documents = retriever.get_relevant_documents_with_scores(query)
+
+
+
+
+get_relevant_documents_with_scores(query: str) Sequence[SearchResult][source]
+
+ +
+ +
+
+

intelligence_layer.connectors.retrievers.in_memory_retriever module

+
+
+class intelligence_layer.connectors.retrievers.in_memory_retriever.InMemoryRetriever(client: Client, documents: Sequence[Document], k: int, threshold: float = 0.5, retriever_type: RetrieverType = RetrieverType.ASYMMETRIC)[source]
+

Bases: BaseRetriever

+

Search through documents stored in memory using semantic search.

+

This retriever uses a [Qdrant](https://github.com/qdrant/qdrant)-in-Memory vector store instance to store documents and their asymmetric embeddings. +When run, the given query is embedded and scored against the document embeddings to retrieve the k-most similar matches by cosine similarity.

+
+
Parameters:
+
    +
  • client – Aleph Alpha client instance for running model related API calls.

  • +
  • texts – The sequence of texts to be made searchable.

  • +
  • k – The (top) number of documents to be returned by search.

  • +
  • threshold – The mimumum value of cosine similarity between the query vector and the document vector.

  • +
  • retriever_type – The type of retriever to be instantiated. +Should be ASYMMETRIC for most query-document retrieveal use cases, SYMMETRIC is optimized +for similar document retrieval.

  • +
+
+
+

Example

+
>>> client = Client(os.getenv("AA_TOKEN"))
+>>> documents = [Document(text=t) for t in ["I do not like rain.", "Summer is warm.", "We are so back."]]
+>>> retriever = InMemoryRetriever(client, documents)
+>>> query = "Do you like summer?"
+>>> documents = retriever.get_relevant_documents_with_scores(query)
+
+
+
+
+MAX_WORKERS = 10
+
+ +
+
+get_filtered_documents_with_scores(query: str, filter: Filter) Sequence[SearchResult][source]
+

Specific method for InMemoryRetriever to support filtering search results.

+
+ +
+
+get_relevant_documents_with_scores(query: str) Sequence[SearchResult][source]
+
+ +
+ +
+
+class intelligence_layer.connectors.retrievers.in_memory_retriever.RetrieverType(value)[source]
+

Bases: Enum

+

Specify the type of retriever to instantiate.

+
+
+ASYMMETRIC
+

Query is embedded as Query and each document as Document.

+
+ +
+
+SYMMETRIC
+

Both query and documents will be embedded as Symmetric.

+
+ +
+
+ASYMMETRIC = (SemanticRepresentation.Query, SemanticRepresentation.Document)
+
+ +
+
+SYMMETRIC = (SemanticRepresentation.Symmetric, SemanticRepresentation.Symmetric)
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.core.html b/intelligence_layer.core.html new file mode 100644 index 000000000..f2306f243 --- /dev/null +++ b/intelligence_layer.core.html @@ -0,0 +1,2536 @@ + + + + + + + + intelligence_layer.core package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer.core package

+
+

Submodules

+
+
+

intelligence_layer.core.complete module

+
+
+class intelligence_layer.core.complete.Complete(client: Client)[source]
+

Bases: Task[CompleteInput, CompleteOutput]

+

Performs a completion request with access to all possible request parameters.

+

Only use this task if non of the higher level tasks defined below works for +you, as your completion request does not fit to the use-cases the higher level ones represent or +you need to control request-parameters that are not exposed by them.

+
+
Parameters:
+

client – Aleph Alpha client instance for running model related API calls.

+
+
+
+
+run(input: CompleteInput, logger: DebugLogger) CompleteOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.core.complete.CompleteInput(*, request: CompletionRequest, model: str)[source]
+

Bases: BaseModel

+

The input for a Complete task.

+
+
+request
+

Aleph Alpha Client’s CompletionRequest. This gives fine grained control +over all completion parameters that are supported by Aleph Alpha’s inference API.

+
+
Type:
+

aleph_alpha_client.completion.CompletionRequest

+
+
+
+ +
+
+model
+

A valid Aleph Alpha model name.

+
+
Type:
+

str

+
+
+
+ +
+
+model: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'model': FieldInfo(annotation=str, required=True), 'request': FieldInfo(annotation=CompletionRequest, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+request: CompletionRequest
+
+ +
+ +
+
+class intelligence_layer.core.complete.CompleteOutput(*, response: CompletionResponse)[source]
+

Bases: BaseModel

+

The output of a Complete task.

+
+
+response
+

Aleph Alpha Client’s CompletionResponse containing all details +provided by Aleph Alpha’s inference API.

+
+
Type:
+

aleph_alpha_client.completion.CompletionResponse

+
+
+
+ +
+
+property completion: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'response': FieldInfo(annotation=CompletionResponse, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+response: CompletionResponse
+
+ +
+ +
+
+class intelligence_layer.core.complete.Instruct(client: Client)[source]
+

Bases: Task[InstructInput, InstructOutput]

+

Runs zero-shot instruction completions on a model.

+

Can be used for various types of instructions a LLM could handle, like QA, summarization, +translation and more.

+
+
Parameters:
+

client – Aleph Alpha client instance for running model related API calls.

+
+
+
+
+INSTRUCTION_PROMPT_TEMPLATE
+

The prompt-template used to build the actual Prompt sent +to the inference API.

+
+ +

Example

+
>>> client = Client(os.getenv("AA_TOKEN"))
+>>> task = Instruction(client)
+>>> input = InstructionInput(
+>>>     instruction="Translate the following to text to German.",
+>>>     input="An apple a day keeps the doctor away."
+>>> )
+>>> logger = InMemoryLogger(name="Instruction")
+>>> output = task.run(input, logger)
+>>> print(output.response)
+Ein Apfel am Tag hält den Arzt fern.
+
+
+
+
+INSTRUCTION_PROMPT_TEMPLATE = '### Instruction:\n{% promptrange instruction %}{{instruction}}{% endpromptrange %}\n{% if input %}\n### Input:\n{% promptrange input %}{{input}}{% endpromptrange %}\n{% endif %}\n### Response:{{response_prefix}}'
+
+ +
+
+run(input: InstructInput, logger: DebugLogger) InstructOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.core.complete.InstructInput(*, instruction: str, input: str | None, model: str, response_prefix: str = '', maximum_response_tokens: int = 64)[source]
+

Bases: BaseModel

+

The input for an Instruct.

+
+
+instruction
+

A textual instruction for the model. +Could be a directive to answer a question or to translate something.

+
+
Type:
+

str

+
+
+
+ +
+
+input
+

The text input for the instruction, e.g. a text to be translated.

+
+
Type:
+

str | None

+
+
+
+ +
+
+model
+

The name of the model that should handle the instruction. +Certain models are optimized for handling such instruction tasks. +Typically their name contains ‘control’, e.g. ‘luminous-extended-control’.

+
+
Type:
+

str

+
+
+
+ +
+
+response_prefix
+

A string that is provided to the LLM as a prefix of the response. +This can steer the model completion.

+
+
Type:
+

str

+
+
+
+ +
+
+maximum_response_tokens
+

The maximum number of tokens to be generated in the answer. +The default corresponds to roughly one short paragraph.

+
+
Type:
+

int

+
+
+
+ +
+
+input: str | None
+
+ +
+
+instruction: str
+
+ +
+
+maximum_response_tokens: int
+
+ +
+
+model: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'input': FieldInfo(annotation=Union[str, NoneType], required=True), 'instruction': FieldInfo(annotation=str, required=True), 'maximum_response_tokens': FieldInfo(annotation=int, required=False, default=64), 'model': FieldInfo(annotation=str, required=True), 'response_prefix': FieldInfo(annotation=str, required=False, default='')}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+response_prefix: str
+
+ +
+ +
+
+class intelligence_layer.core.complete.InstructOutput(*, response: str, prompt_with_metadata: PromptWithMetadata)[source]
+

Bases: BaseModel

+

The output of an Instruct.

+
+
+response
+

The generated response to the instruction.

+
+
Type:
+

str

+
+
+
+ +
+
+prompt_with_metadata
+

To handle the instruction, a PromptTemplate is used. +The template defines two PromptRange`s: +- “instruction”: covering the instruction text as provided in the `InstructionInput. +- “input”: covering the input text as provided in the InstructionInput. +These can for example be used for downstream TextHighlight tasks.

+
+
Type:
+

intelligence_layer.core.prompt_template.PromptWithMetadata

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'prompt_with_metadata': FieldInfo(annotation=PromptWithMetadata, required=True), 'response': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+prompt_with_metadata: PromptWithMetadata
+
+ +
+
+response: str
+
+ +
+ +
+
+

intelligence_layer.core.echo module

+
+
+class intelligence_layer.core.echo.EchoInput(*, prompt: Prompt, expected_completion: str, model: str)[source]
+

Bases: BaseModel

+

The input for an EchoTask.

+
+
+prompt
+

The input text that serves as the starting point for the LLM.

+
+
Type:
+

aleph_alpha_client.prompt.Prompt

+
+
+
+ +
+
+expected_completion
+

The desired completion based on the prompt. +The likelihood of the tokens in this will be examined.

+
+
Type:
+

str

+
+
+
+ +
+
+model
+

A valid Aleph Alpha model name.

+
+
Type:
+

str

+
+
+
+ +
+
+expected_completion: str
+
+ +
+
+model: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'expected_completion': FieldInfo(annotation=str, required=True), 'model': FieldInfo(annotation=str, required=True), 'prompt': FieldInfo(annotation=Prompt, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+prompt: Prompt
+
+ +
+ +
+
+class intelligence_layer.core.echo.EchoOutput(*, tokens_with_log_probs: Sequence[TokenWithProb])[source]
+

Bases: BaseModel

+

The output of an EchoTask.

+
+
+tokens_with_log_probs
+

Every token of the expected_completion of the +EchoInput accompanied by its probability of having been generated +in a completion scenario.

+
+
Type:
+

Sequence[intelligence_layer.core.echo.TokenWithProb]

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'tokens_with_log_probs': FieldInfo(annotation=Sequence[TokenWithProb], required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+tokens_with_log_probs: Sequence[TokenWithProb]
+
+ +
+ +
+
+class intelligence_layer.core.echo.EchoTask(client: Client)[source]
+

Bases: Task[EchoInput, EchoOutput]

+

Task that returns probabilities of a completion given a prompt.

+

Analyzes the likelihood of generating tokens in the expected completion based on +a given prompt and model. Does not generate any tokens.

+
+
Parameters:
+

client – Aleph Alpha client instance for running model related API calls.

+
+
+

Example

+
>>> client = Client(token="AA_TOKEN")
+>>> task = EchoTask(client)
+>>> input = EchoTaskInput(
+        prompt="This is a ",
+        expected_completion="happy text",
+        model="luminous-base",
+    )
+>>> logger = InMemoryLogger(name="EchoTask")
+>>> output = task.run(input, logger)
+>>> print(output.tokens_with_log_probs[0]).prob
+0.6
+
+
+
+
+PROMPT_TEMPLATE: PromptTemplate = <intelligence_layer.core.prompt_template.PromptTemplate object>
+
+ +
+
+run(input: EchoInput, logger: DebugLogger) EchoOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.core.echo.TokenWithProb(*, token: Token, prob: Probability | LogProb)[source]
+

Bases: BaseModel

+
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'prob': FieldInfo(annotation=Union[NewType, NewType], required=True), 'token': FieldInfo(annotation=Token, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+prob: Probability | LogProb
+
+ +
+
+token: Token
+
+ +
+ +
+
+

intelligence_layer.core.evaluator module

+
+
+class intelligence_layer.core.evaluator.Dataset(*, name: str, examples: Sequence[Example])[source]
+

Bases: BaseModel, Generic[Input, ExpectedOutput]

+

A dataset of examples used for evaluation of a task.

+
+
+name
+

This a human readable identifier for a dataset.

+
+
Type:
+

str

+
+
+
+ +
+
+examples
+

The actual examples that a task will be evaluated on.

+
+
Type:
+

Sequence[intelligence_layer.core.evaluator.Example]

+
+
+
+ +
+
+examples: Sequence[Example]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'examples': FieldInfo(annotation=Sequence[Example], required=True), 'name': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+name: str
+
+ +
+ +
+
+class intelligence_layer.core.evaluator.Evaluator[source]
+

Bases: ABC, Generic[Input, ExpectedOutput, Evaluation, AggregatedEvaluation]

+

Base evaluator interface. This should run certain evaluation steps for some job.

+
+
Generics:

Input: Interface to be passed to the task that shall be evaluated. +ExpectedOutput: Output that is expected from the task run with the supplied input. +Evaluation: Interface of the metrics that come from the evaluated task. +AggregatedEvaluation: The aggregated results of an evaluation run with a dataset.

+
+
+

We suggest supplying a Task in the __init__ method and running it in the evaluate method.

+
+
+abstract aggregate(evaluations: Sequence[Evaluation]) AggregatedEvaluation[source]
+

Evaluator-specific method for aggregating individual Evaluations into report-like Aggregated Evaluation.

+
+ +
+
+abstract evaluate(input: Input, logger: DebugLogger, expected_output: ExpectedOutput) Evaluation[source]
+

Executes the evaluation for this use-case.

+
+ +
+
+evaluate_dataset(dataset: Dataset, logger: DebugLogger) AggregatedEvaluation[source]
+

Evaluates an entire datasets in a threaded manner and aggregates the results into an AggregatedEvaluation.

+
+ +
+ +
+
+class intelligence_layer.core.evaluator.Example(*, input: Input, expected_output: ExpectedOutput, ident: str | None = None)[source]
+

Bases: BaseModel, Generic[Input, ExpectedOutput]

+

Example case used for evaluations.

+
+
+input
+

Input for the task. Has to be same type as the input for the task used.

+
+
Type:
+

intelligence_layer.core.task.Input

+
+
+
+ +
+
+expected_output
+

The expected output from a given example run. +This will be used by the evaluator to compare the received output with.

+
+
Type:
+

intelligence_layer.core.evaluator.ExpectedOutput

+
+
+
+ +
+
+ident
+

Identifier for the example, defaults to uuid.

+
+
Type:
+

str | None

+
+
+
+ +
+
+expected_output: ExpectedOutput
+
+ +
+
+ident: str | None
+
+ +
+
+input: Input
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'expected_output': FieldInfo(annotation=~ExpectedOutput, required=True), 'ident': FieldInfo(annotation=Union[str, NoneType], required=False, default_factory=<lambda>), 'input': FieldInfo(annotation=~Input, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+ +
+
+

intelligence_layer.core.explain module

+
+
+class intelligence_layer.core.explain.Explain(client: Client)[source]
+

Bases: Task[ExplainInput, ExplainOutput]

+

Performs an explanation request with access to all possible request parameters.

+

Only use this task if non of the higher level tasks defined below works for +you, for example if the TextHighlight task does not fit your use case.

+
+
Parameters:
+

client – Aleph Alpha client instance for running model related API calls.

+
+
+
+
+run(input: ExplainInput, logger: DebugLogger) ExplainOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.core.explain.ExplainInput(*, request: ExplanationRequest, model: str)[source]
+

Bases: BaseModel

+

The input for a Explain task.

+
+
+request
+

Aleph Alpha Client’s ExplanationRequest. This gives fine grained control +over all explanation parameters that are supported by Aleph Alpha’s inference API.

+
+
Type:
+

aleph_alpha_client.explanation.ExplanationRequest

+
+
+
+ +
+
+model
+

A valid Aleph Alpha model name.

+
+
Type:
+

str

+
+
+
+ +
+
+model: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'model': FieldInfo(annotation=str, required=True), 'request': FieldInfo(annotation=ExplanationRequest, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+request: ExplanationRequest
+
+ +
+ +
+
+class intelligence_layer.core.explain.ExplainOutput(*, response: ExplanationResponse)[source]
+

Bases: BaseModel

+

The output of a Explain task.

+
+
+response
+

Aleph Alpha Client’s ExplanationResponse containing all details +provided by Aleph Alpha’s inference API.

+
+
Type:
+

aleph_alpha_client.explanation.ExplanationResponse

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'response': FieldInfo(annotation=ExplanationResponse, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+response: ExplanationResponse
+
+ +
+ +
+
+

intelligence_layer.core.logger module

+
+
+class intelligence_layer.core.logger.DebugLogger(*args, **kwargs)[source]
+

Bases: Protocol

+

A protocol for instrumenting `Task`s with structured logging.

+

A logger needs to provide a way to collect an individual log, which should be serializable, and +a way to generate nested loggers, so that sub-tasks can emit logs that are grouped together.

+

Each DebugLogger is given a name to distinguish them from each other, and for nested logs.

+

Implementations of how logs are collected and stored may differ. Refer to the individual +documentation of each implementation to see how to use the resulting logger.

+
+
+log(message: str, value: PydanticSerializable) None[source]
+

Record a log of relevant information as part of a step within a task.

+

By default, the Input and Output of each Task are logged automatically, but you can +log anything else that seems relevant to understanding the output of a given task.

+
+
Parameters:
+
    +
  • message – A description of the value you are logging, such as the step in the task this +is related to.

  • +
  • value – The relevant data you want to log. Can be anything that is serializable by +Pydantic, which gives the loggers flexibility in how they store and emit the logs.

  • +
+
+
+
+ +
+
+span(name: str) Span[source]
+

Generate a span from the current logging instance.

+

Each logger implementation can decide on how it wants to represent this, but they should +all allow for representing logs of a child task within the scope of the current task.

+
+
Parameters:
+

name – A descriptive name of what this span will contain logs about.

+
+
Returns:
+

An instance of something that meets the protocol of Span.

+
+
+
+ +
+
+task_span(task_name: str, input: PydanticSerializable) TaskSpan[source]
+

Generate a task-specific span from the current logging instance.

+

Each logger implementation can decide on how it wants to represent this, but they should +all allow for representing logs of a span within the context of a parent span.

+
+
Parameters:
+
    +
  • task_name – The name of the task that is being logged

  • +
  • input – The input for the task that is being logged.

  • +
+
+
Returns:
+

An instance of something that also meets the protocol of DebugLogger. Most likely, it +will create an instance of the same type, but this is dependent on the actual +implementation.

+
+
+
+ +
+ +
+
+class intelligence_layer.core.logger.EndSpan(*, uuid: UUID, end: datetime)[source]
+

Bases: BaseModel

+

Represents the payload/entry of a log-line that indicates that a Span ended.

+
+
+uuid
+

The uuid of the corresponding StartSpan.

+
+
Type:
+

uuid.UUID

+
+
+
+ +
+
+end
+

the timestamp when this Span completed.

+
+
Type:
+

datetime.datetime

+
+
+
+ +
+
+end: datetime
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'end': FieldInfo(annotation=datetime, required=True), 'uuid': FieldInfo(annotation=UUID, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+uuid: UUID
+
+ +
+ +
+
+class intelligence_layer.core.logger.EndTask(*, uuid: UUID, end: datetime, output: Any)[source]
+

Bases: BaseModel

+

Represents the payload/entry of a log-line that indicates that a TaskSpan ended (i.e. the context-manager exited).

+
+
+uuid
+

The uuid of the corresponding StartTask.

+
+
Type:
+

uuid.UUID

+
+
+
+ +
+
+end
+

the timestamp when this Task completed (i.e. run returned).

+
+
Type:
+

datetime.datetime

+
+
+
+ +
+
+output
+

the Output (i.e. return value of run) the Task returned.

+
+
Type:
+

Any

+
+
+
+ +
+
+end: datetime
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'end': FieldInfo(annotation=datetime, required=True), 'output': FieldInfo(annotation=Any, required=True, metadata=[SerializeAsAny()]), 'uuid': FieldInfo(annotation=UUID, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+output: Any
+
+ +
+
+uuid: UUID
+
+ +
+ +
+
+class intelligence_layer.core.logger.FileDebugLogger(log_file_path: Path)[source]
+

Bases: DebugLogger

+

A DebugLogger that logs to a file.

+

Each log-entry is represented by a JSON object. The information logged allows +to reconstruct the hierarchical nature of the logs, i.e. all entries have a +_pointer_ to its parent element in form of a parent attribute containing +the uuid of the parent.

+
+
Parameters:
+

log_file_path – Denotes the file to log to.

+
+
+
+
+uuid
+

a uuid for the logger. If multiple `FileDebugLogger`s log to the same file +the child-elements for a logger can be identified by referring to this id as parent.

+
+ +
+
+log(message: str, value: PydanticSerializable) None[source]
+

Record a log of relevant information as part of a step within a task.

+

By default, the Input and Output of each Task are logged automatically, but you can +log anything else that seems relevant to understanding the output of a given task.

+
+
Parameters:
+
    +
  • message – A description of the value you are logging, such as the step in the task this +is related to.

  • +
  • value – The relevant data you want to log. Can be anything that is serializable by +Pydantic, which gives the loggers flexibility in how they store and emit the logs.

  • +
+
+
+
+ +
+
+span(name: str) FileSpan[source]
+

Generate a span from the current logging instance.

+

Each logger implementation can decide on how it wants to represent this, but they should +all allow for representing logs of a child task within the scope of the current task.

+
+
Parameters:
+

name – A descriptive name of what this span will contain logs about.

+
+
Returns:
+

An instance of something that meets the protocol of Span.

+
+
+
+ +
+
+task_span(task_name: str, input: PydanticSerializable) FileTaskSpan[source]
+

Generate a task-specific span from the current logging instance.

+

Each logger implementation can decide on how it wants to represent this, but they should +all allow for representing logs of a span within the context of a parent span.

+
+
Parameters:
+
    +
  • task_name – The name of the task that is being logged

  • +
  • input – The input for the task that is being logged.

  • +
+
+
Returns:
+

An instance of something that also meets the protocol of DebugLogger. Most likely, it +will create an instance of the same type, but this is dependent on the actual +implementation.

+
+
+
+ +
+ +
+
+class intelligence_layer.core.logger.FileSpan(log_file_path: Path, name: str)[source]
+

Bases: FileDebugLogger, AbstractContextManager[FileSpan]

+

A Span created by FileDebugLogger.span.

+
+ +
+
+class intelligence_layer.core.logger.FileTaskSpan(log_file_path: Path, task_name: str, input: PydanticSerializable)[source]
+

Bases: FileSpan, AbstractContextManager[FileTaskSpan]

+

A TaskSpan created by FileDebugLogger.task_span.

+
+
+output: PydanticSerializable | None = None
+
+ +
+
+record_output(output: PydanticSerializable) None[source]
+
+ +
+ +
+
+class intelligence_layer.core.logger.InMemoryDebugLogger(**data: Any)[source]
+

Bases: BaseModel

+

Collects log entries in a nested structure, and keeps them in memory.

+

If desired, the structure is serializable with Pydantic, so you can write out the JSON +representation to a file, or return via an API, or something similar.

+
+
+name
+

A descriptive name of what the logger contains log entries about.

+
+
Type:
+

str

+
+
+
+ +
+
+logs
+

A sequential list of log entries and/or nested InMemoryDebugLoggers with their own +log entries.

+
+
Type:
+

list[intelligence_layer.core.logger.LogEntry | intelligence_layer.core.logger.InMemorySpan | intelligence_layer.core.logger.InMemoryTaskSpan]

+
+
+
+ +
+
+log(message: str, value: PydanticSerializable) None[source]
+

Record a log of relevant information as part of a step within a task.

+

By default, the Input and Output of each Task are logged automatically, but you can +log anything else that seems relevant to understanding the output of a given task.

+
+
Parameters:
+
    +
  • message – A description of the value you are logging, such as the step in the task this +is related to.

  • +
  • value – The relevant data you want to log. Can be anything that is serializable by +Pydantic, which gives the loggers flexibility in how they store and emit the logs.

  • +
+
+
+
+ +
+
+logs: list[LogEntry | InMemorySpan | InMemoryTaskSpan]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'logs': FieldInfo(annotation=list[Union[LogEntry, InMemorySpan, InMemoryTaskSpan]], required=False, default=[]), 'name': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+name: str
+
+ +
+
+span(name: str) InMemorySpan[source]
+

Generate a sub-logger from the current logging instance.

+
+
Parameters:
+

name – A descriptive name of what this child logger will contain logs about.

+
+
Returns:
+

A nested InMemoryDebugLogger that is stored in a nested position as part of the parent +logger.

+
+
+
+ +
+
+task_span(task_name: str, input: PydanticSerializable) InMemoryTaskSpan[source]
+

Generate a task-specific span from the current logging instance.

+
+
Parameters:
+
    +
  • task_name – The name of the task that is being logged

  • +
  • input – The input for the task that is being logged.

  • +
+
+
Returns:
+

+
A nested InMemoryTaskSpan that is stored in a nested position as part of the parent

logger

+
+
+

+
+
+
+ +
+ +
+
+class intelligence_layer.core.logger.InMemorySpan(*, name: str, logs: list[LogEntry | InMemorySpan | InMemoryTaskSpan] = [])[source]
+

Bases: AbstractContextManager[InMemorySpan], InMemoryDebugLogger

+
+
+end_timestamp: datetime | None
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'end_timestamp': FieldInfo(annotation=Union[datetime, NoneType], required=False), 'logs': FieldInfo(annotation=list[Union[LogEntry, InMemorySpan, InMemoryTaskSpan]], required=False, default=[]), 'name': FieldInfo(annotation=str, required=True), 'start_timestamp': FieldInfo(annotation=Union[datetime, NoneType], required=False, default_factory=builtin_function_or_method)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+start_timestamp: datetime | None
+
+ +
+ +
+
+class intelligence_layer.core.logger.InMemoryTaskSpan(*, name: str, logs: list[LogEntry | InMemorySpan | InMemoryTaskSpan] = [], start_timestamp: datetime | None = None, end_timestamp: datetime | None = None, input: PydanticSerializable, output: PydanticSerializable | None = None)[source]
+

Bases: InMemorySpan

+
+
+input: PydanticSerializable
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'end_timestamp': FieldInfo(annotation=Union[datetime, NoneType], required=False), 'input': FieldInfo(annotation=TypeAliasType, required=True, metadata=[SerializeAsAny()]), 'logs': FieldInfo(annotation=list[Union[LogEntry, InMemorySpan, InMemoryTaskSpan]], required=False, default=[]), 'name': FieldInfo(annotation=str, required=True), 'output': FieldInfo(annotation=Union[Annotated[TypeAliasType, SerializeAsAny], NoneType], required=False), 'start_timestamp': FieldInfo(annotation=Union[datetime, NoneType], required=False, default_factory=builtin_function_or_method)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+output: PydanticSerializable | None
+
+ +
+
+record_output(output: PydanticSerializable) None[source]
+

Record a Task’s output. Since a Context Manager can’t provide this in the __exit__ +method, output should be captured once it is generated.

+

This should be handled automatically within the execution of the task.

+
+
Parameters:
+

output – The output of the task that is being logged.

+
+
+
+ +
+ +
+
+class intelligence_layer.core.logger.JsonSerializer(root: RootModelRootType = PydanticUndefined)[source]
+

Bases: RootModel[TypeAliasType]

+
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'root': FieldInfo(annotation=TypeAliasType, required=True, metadata=[SerializeAsAny()])}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+root: PydanticSerializable
+
+ +
+ +
+
+class intelligence_layer.core.logger.LogEntry(*, message: str, value: PydanticSerializable, timestamp: datetime = None)[source]
+

Bases: BaseModel

+

An individual log entry, currently used to represent individual logs by the +InMemoryDebugLogger.

+
+
+message
+

A description of the value you are logging, such as the step in the task this +is related to.

+
+
Type:
+

str

+
+
+
+ +
+
+value
+

The relevant data you want to log. Can be anything that is serializable by +Pydantic, which gives the loggers flexibility in how they store and emit the logs.

+
+
Type:
+

PydanticSerializable

+
+
+
+ +
+
+timestamp
+

The time that the log was emitted.

+
+
Type:
+

datetime.datetime

+
+
+
+ +
+
+message: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'message': FieldInfo(annotation=str, required=True), 'timestamp': FieldInfo(annotation=datetime, required=False, default_factory=builtin_function_or_method), 'value': FieldInfo(annotation=TypeAliasType, required=True, metadata=[SerializeAsAny()])}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+timestamp: datetime
+
+ +
+
+value: PydanticSerializable
+
+ +
+ +
+
+class intelligence_layer.core.logger.LogLine(*, entry_type: str, entry: Any)[source]
+

Bases: BaseModel

+

Represents a a complete log-line.

+
+
+entry_type
+

The type of the entry. This is the class-name of one of the classes +representing a log-entry (e.g. “StartTask”).

+
+
Type:
+

str

+
+
+
+ +
+
+entry
+

The actual entry.

+
+
Type:
+

Any

+
+
+
+ +
+
+entry: Any
+
+ +
+
+entry_type: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'entry': FieldInfo(annotation=Any, required=True, metadata=[SerializeAsAny()]), 'entry_type': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+ +
+
+class intelligence_layer.core.logger.NoOpDebugLogger[source]
+

Bases: object

+

A no-op logger. Useful for cases, like testing, where a logger is needed for a task, but you +don’t have a need to collect or inspect the actual logs.

+

All calls to log won’t actually do anything.

+
+
+log(message: str, value: PydanticSerializable) None[source]
+

Record a log of relevant information as part of a step within a task.

+

By default, the Input and Output of each Task are logged automatically, but you can +log anything else that seems relevant to understanding the output of a given task.

+
+
Parameters:
+
    +
  • message – A description of the value you are logging, such as the step in the task this +is related to.

  • +
  • value – The relevant data you want to log. Can be anything that is serializable by +Pydantic, which gives the loggers flexibility in how they store and emit the logs.

  • +
+
+
+
+ +
+
+span(name: str) NoOpTaskSpan[source]
+

Generate a sub-logger from the current logging instance.

+
+
Parameters:
+

name – A descriptive name of what this child logger will contain logs about.

+
+
Returns:
+

Another NoOpDebugLogger

+
+
+
+ +
+
+task_span(task_name: str, input: PydanticSerializable) NoOpTaskSpan[source]
+

Generate a task-specific span from the current logging instance.

+
+
Parameters:
+
    +
  • task_name – The name of the task that is being logged

  • +
  • input – The input for the task that is being logged.

  • +
+
+
Returns:
+

A NoOpTaskSpan

+
+
+
+ +
+ +
+
+class intelligence_layer.core.logger.NoOpTaskSpan[source]
+

Bases: NoOpDebugLogger, AbstractContextManager[NoOpTaskSpan]

+
+
+record_output(output: PydanticSerializable) None[source]
+

Record a Task’s output. Since a Context Manager can’t provide this in the __exit__ +method, output should be captured once it is generated.

+

This should be handled automatically within the execution of the task.

+
+
Parameters:
+

output – The output of the task that is being logged.

+
+
+
+ +
+ +
+
+class intelligence_layer.core.logger.PlainEntry(*, message: str, value: Any, timestamp: datetime, parent: UUID)[source]
+

Bases: BaseModel

+

Represents a plain log-entry created through DebugLogger.log.

+
+
+message
+

the message-parameter of DebugLogger.log

+
+
Type:
+

str

+
+
+
+ +
+
+value
+

the value-parameter of DebugLogger.log

+
+
Type:
+

Any

+
+
+
+ +
+
+timestamp
+

the timestamp when DebugLogger.log was called.

+
+
Type:
+

datetime.datetime

+
+
+
+ +
+
+parent
+

The unique id of the parent element of the log. +This could refer to either a surrounding TaskSpan, Span or the top-level DebugLogger.

+
+
Type:
+

uuid.UUID

+
+
+
+ +
+
+message: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'message': FieldInfo(annotation=str, required=True), 'parent': FieldInfo(annotation=UUID, required=True), 'timestamp': FieldInfo(annotation=datetime, required=True), 'value': FieldInfo(annotation=Any, required=True, metadata=[SerializeAsAny()])}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+parent: UUID
+
+ +
+
+timestamp: datetime
+
+ +
+
+value: Any
+
+ +
+ +
+
+class intelligence_layer.core.logger.Span(*args, **kwargs)[source]
+

Bases: AbstractContextManager[Span], DebugLogger, Protocol

+

A protocol for instrumenting logs nested within a span of time. Groups logs by some logical +step.

+

The implementation should also be a Context Manager, to capture the span of duration of +execution.

+

Implementations of how logs are collected and stored may differ. Refer to the individual +documentation of each implementation to see how to use the resulting logger.

+
+ +
+
+class intelligence_layer.core.logger.StartSpan(*, uuid: UUID, parent: UUID, name: str, start: datetime)[source]
+

Bases: BaseModel

+

Represents the payload/entry of a log-line indicating that a Span was opened through DebugLogger.span.

+
+
+uuid
+

A unique id for the opened Span.

+
+
Type:
+

uuid.UUID

+
+
+
+ +
+
+parent
+

The unique id of the parent element of opened TaskSpan. +This could refer to either a surrounding TaskSpan, Span or the top-level DebugLogger.

+
+
Type:
+

uuid.UUID

+
+
+
+ +
+
+name
+

The name of the task.

+
+
Type:
+

str

+
+
+
+ +
+
+start
+

The timestamp when this Span was started.

+
+
Type:
+

datetime.datetime

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'name': FieldInfo(annotation=str, required=True), 'parent': FieldInfo(annotation=UUID, required=True), 'start': FieldInfo(annotation=datetime, required=True), 'uuid': FieldInfo(annotation=UUID, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+name: str
+
+ +
+
+parent: UUID
+
+ +
+
+start: datetime
+
+ +
+
+uuid: UUID
+
+ +
+ +
+
+class intelligence_layer.core.logger.StartTask(*, uuid: UUID, parent: UUID, name: str, start: datetime, input: Any)[source]
+

Bases: BaseModel

+

Represents the payload/entry of a log-line indicating that a TaskSpan was opened through DebugLogger.task_span.

+
+
+uuid
+

A unique id for the opened TaskSpan.

+
+
Type:
+

uuid.UUID

+
+
+
+ +
+
+parent
+

The unique id of the parent element of opened TaskSpan. +This could refer to either a surrounding TaskSpan, Span or the top-level DebugLogger.

+
+
Type:
+

uuid.UUID

+
+
+
+ +
+
+name
+

The name of the task.

+
+
Type:
+

str

+
+
+
+ +
+
+start
+

The timestamp when this Task was started (i.e. run was called).

+
+
Type:
+

datetime.datetime

+
+
+
+ +
+
+input
+

The Input (i.e. parameter for run) the Task was started with.

+
+
Type:
+

Any

+
+
+
+ +
+
+input: Any
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'input': FieldInfo(annotation=Any, required=True, metadata=[SerializeAsAny()]), 'name': FieldInfo(annotation=str, required=True), 'parent': FieldInfo(annotation=UUID, required=True), 'start': FieldInfo(annotation=datetime, required=True), 'uuid': FieldInfo(annotation=UUID, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+name: str
+
+ +
+
+parent: UUID
+
+ +
+
+start: datetime
+
+ +
+
+uuid: UUID
+
+ +
+ +
+
+class intelligence_layer.core.logger.TaskSpan(*args, **kwargs)[source]
+

Bases: AbstractContextManager[TaskSpan], DebugLogger, Protocol

+

A protocol for instrumenting a Task’s input, output, and nested logs.

+

Most likely, generating this task logger will capture the Task’s input, as well as the task +name.

+

The implementation should also be a Context Manager, to capture the span of duration of +task execution.

+

Implementations of how logs are collected and stored may differ. Refer to the individual +documentation of each implementation to see how to use the resulting logger.

+
+
+record_output(output: PydanticSerializable) None[source]
+

Record a Task’s output. Since a Context Manager can’t provide this in the __exit__ +method, output should be captured once it is generated.

+

This should be handled automatically within the execution of the task.

+
+
Parameters:
+

output – The output of the task that is being logged.

+
+
+
+ +
+ +
+
+

intelligence_layer.core.prompt_template module

+
+
+class intelligence_layer.core.prompt_template.PromptItemCursor(item: int)[source]
+

Bases: object

+

Defines a position with a non-Text prompt item.

+
+
Parameters:
+

item – the index of the prompt item within the Prompt

+
+
+
+
+item: int
+
+ +
+ +
+
+class intelligence_layer.core.prompt_template.PromptRange(start: TextCursor | PromptItemCursor, end: TextCursor | PromptItemCursor)[source]
+

Bases: object

+

Defines a range within a Prompt.

+
+
+end: TextCursor | PromptItemCursor
+
+ +
+
+start: TextCursor | PromptItemCursor
+
+ +
+ +
+
+class intelligence_layer.core.prompt_template.PromptRangeContext(env: Environment, globals: Mapping[str, object] | None = None, disabled_tags: List[str] | None = None, copy_depth: int = 0, parent_context: Context | None = None, loop_iteration_carry: int = 1, local_namespace_size_carry: int = 0, template: BoundTemplate | None = None)[source]
+

Bases: Context

+

A liquid Context with some additional state used by the PromptRangeNode.

+
+
+add_placeholder_range(placeholder: Placeholder, name: str) None[source]
+
+ +
+
+autoescape
+
+ +
+
+counters: Dict[str, int]
+
+ +
+
+disabled_tags
+
+ +
+
+env
+
+ +
+
+globals
+
+ +
+
+local_namespace_size_carry
+
+ +
+
+locals: MutableMapping[str, object]
+
+ +
+
+loop_iteration_carry
+
+ +
+
+loops: List[ForLoop]
+
+ +
+
+parent_context
+
+ +
+
+placeholder_range_names() Mapping[Placeholder, str][source]
+
+ +
+
+scope
+
+ +
+
+tag_namespace: Dict[str, Any]
+
+ +
+
+template
+
+ +
+ +
+
+class intelligence_layer.core.prompt_template.PromptRangeNode(inner: BlockNode, name: str)[source]
+

Bases: Node

+

A liquid Node representing a promptrange.

+
+
+render_to_output(context: Context, buffer: TextIO) bool | None[source]
+

Render this node to the output buffer.

+
+ +
+ +
+
+class intelligence_layer.core.prompt_template.PromptRangeTag(env: Environment)[source]
+

Bases: Tag

+

Defines the liquid tag for the promptrange.

+
+
+end = 'endpromptrange'
+
+ +
+
+name = 'promptrange'
+
+ +
+
+parse(stream: TokenStream) Node[source]
+

Return a parse tree node by parsing tokens from the given stream.

+
+ +
+ +
+
+class intelligence_layer.core.prompt_template.PromptTemplate(template_str: str)[source]
+

Bases: object

+

Allows to build a Prompt using the liquid template language.

+

To add non-text prompt items first you have to save it to the template with the template.placeholder() function. +To embed the items in the template, pass the placeholder in the place(s) where you would like the items.

+

Example

+
>>> image = Image.from_file(Path("path-to-image"))
+>>> template = PromptTemplate(
+    '''{%- for name in names -%}
+    Hello {{name}}!
+    {% endfor -%}
+    {{ image }}
+    ''')
+>>> placeholder = template.placeholder(image)
+>>> names = ["World", "Rutger"]
+>>> prompt = template.to_prompt(names=names, image=placeholder)
+>>> request = CompletionRequest(prompt=prompt)
+
+
+
+
+embed_prompt(prompt: Prompt) str[source]
+

Embeds a prompt in a prompt template

+

Adds whitespace between text items if there is no whitespace between them. +In case of non-text prompt items, this embeds them into the end result.

+

Example

+
>>> user_prompt = Prompt(
+        [
+            Tokens.from_token_ids([1, 2, 3]),
+            Text.from_text("cool"),
+            Image.from_file(Path("path-to-image")),
+        ]
+    )
+>>> template = PromptTemplate("Question: {{user_prompt}}\n Answer: ")
+>>> prompt = template.to_prompt(user_prompt=template.embed_prompt(user_prompt))
+
+
+
+
Parameters:
+

prompt – prompt to embed in the template

+
+
+
+ +
+
+placeholder(value: Image | Tokens) Placeholder[source]
+

Saves a non-text prompt item to the template and returns a placeholder

+

The placeholder is used to embed the prompt item in the template

+
+ +
+
+to_prompt(**kwargs: Any) Prompt[source]
+

Creates a Prompt from the template string and the given parameters.

+

Provided parameters are passed to liquid.Template.render.

+
+ +
+
+to_prompt_with_metadata(**kwargs: Any) PromptWithMetadata[source]
+

Creates a Prompt along with metadata from the template string and the given parameters.

+

Currently the only metadata returned is information about ranges that are marked in the template. +Provided parameters are passed to liquid.Template.render.

+
+ +
+ +
+
+class intelligence_layer.core.prompt_template.PromptWithMetadata(prompt: Prompt, ranges: Mapping[str, Sequence[PromptRange]])[source]
+

Bases: object

+

The Prompt along with some metadata generated when a PromptTemplate is turned into a Prompt.

+
+
Parameters:
+
    +
  • prompt – The actual Prompt.

  • +
  • ranges – A mapping of range name to a Sequence of corresponding `PromptRange`s.

  • +
+
+
+
+
+prompt: Prompt
+
+ +
+
+ranges: Mapping[str, Sequence[PromptRange]]
+
+ +
+ +
+
+class intelligence_layer.core.prompt_template.TextCursor(item: int, position: int)[source]
+

Bases: object

+

Defines a position with a Text prompt item.

+
+
Parameters:
+
    +
  • item – the index of the prompt item within the Prompt

  • +
  • position – the character position in the text of the item.

  • +
+
+
+

Example: +>>> Prompt.from_text(“This is a text”) +>>> TextCursor(item=0, start=5) +>>> # This denotes the “i” in “is” in the text-item of the Prompt above

+
+
+item: int
+
+ +
+
+position: int
+
+ +
+ +
+
+

intelligence_layer.core.task module

+
+
+class intelligence_layer.core.task.Chunk
+

Segment of a larger text.

+

This type infers that the string is smaller than the context size of the model where it is used.

+

LLMs can’t process documents larger than their context size. +To handle this, documents have to be split up into smaller segments that fit within their context size. +These smaller segments are referred to as chunks.

+

alias of str

+
+ +
+
+class intelligence_layer.core.task.Input
+

Interface to be passed to the task with all data needed to run the process. +Ideally, these are specified in terms related to the use-case, rather than lower-level +configuration options.

+

alias of TypeVar(‘Input’, bound=PydanticSerializable)

+
+ +
+
+class intelligence_layer.core.task.Output
+

Interface of the output returned by the task.

+

alias of TypeVar(‘Output’, bound=PydanticSerializable)

+
+ +
+
+class intelligence_layer.core.task.Task[source]
+

Bases: ABC, Generic[Input, Output]

+

Base task interface. This may consist of several sub-tasks to accomplish the given task.

+
+
Generics:
+
Input: Interface to be passed to the task with all data needed to run the process.

Ideally, these are specified in terms related to the use-case, rather than lower-level +configuration options.

+
+
+

Output: Interface of the output returned by the task.

+
+
+
+
+abstract run(input: Input, logger: DebugLogger) Output[source]
+

Executes the process for this use-case.

+
+ +
+
+run_concurrently(inputs: Iterable[Input], debug_logger: DebugLogger, concurrency_limit: int = 20) Sequence[Output][source]
+

Executes multiple processes of this task concurrently.

+

Each provided input is potentially executed concurrently to the others. There is a global limit +on the number of concurrently executed tasks that is shared by all tasks of all types.

+
+
Parameters:
+
    +
  • inputs – The inputs that are potentially processed concurrently.

  • +
  • debug_logger – The logger passed on the run method when executing a task.

  • +
  • concurrency_limit – An optional additional limit for the number of concurrently executed task for +this method call. This can be used to prevent queue-full or similar error of downstream APIs +when the global concurrency limit is too high for a certain task.

  • +
+
+
Returns:
+

+
The Output`s generated by calling `run for each given Input. The order of `Output`s

corresponds to the order of the `Input`s.

+
+
+

+
+
+
+ +
+ +
+
+class intelligence_layer.core.task.Token(*, token: str, token_id: int)[source]
+

Bases: BaseModel

+

A token class containing it’s id and the raw token.

+

This is used instead of the Aleph Alpha client Token class since this one is serializable, +while the one from the client is not.

+
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'token': FieldInfo(annotation=str, required=True), 'token_id': FieldInfo(annotation=int, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+token: str
+
+ +
+
+token_id: int
+
+ +
+ +
+
+intelligence_layer.core.task.batched(iterable: Iterable[T], n: int) Iterable[Iterable[T]][source]
+
+ +
+
+

intelligence_layer.core.text_highlight module

+
+
+class intelligence_layer.core.text_highlight.ScoredTextHighlight(*, text: str, score: float)[source]
+

Bases: BaseModel

+

A substring of the input prompt scored for relevance with regard to the output.

+
+
+text
+

The highlighted part of the prompt.

+
+
Type:
+

str

+
+
+
+ +
+
+score
+

The z-score of the highlight. Depicts relevance of this highlight in relation to all other highlights. Can be positive (support) or negative (contradiction).

+
+
Type:
+

float

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'score': FieldInfo(annotation=float, required=True), 'text': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+score: float
+
+ +
+
+text: str
+
+ +
+ +
+
+class intelligence_layer.core.text_highlight.TextHighlight(client: Client, granularity: PromptGranularity = PromptGranularity.Sentence)[source]
+

Bases: Task[TextHighlightInput, TextHighlightOutput]

+

Generates text highlights given a prompt and completion.

+
+

For a given prompt and target (completion), extracts the parts of the prompt responsible for generation. +A range can be provided in the input ‘PromptWithMetadata’ via use of the liquid language (see the example). +In this case, the highlights will only refer to text within this range.

+
+
Args:

client: Aleph Alpha client instance for running model related API calls.

+
+
+

Example: +>>> client = Client(os.getenv(“AA_TOKEN”)) +>>> text_highlight = TextHighlight(client=client) +>>> prompt_template_str = “{% promptrange r1 %}Question: What is 2 + 2?{% endpromptrange %}

+
+
+
Answer:”
>>> template = PromptTemplate(prompt_template_str)
+>>> prompt_with_metadata = template.to_prompt_with_metadata()
+>>> completion = " 4."
+>>> model = "luminous-base"
+>>> input = TextHighlightInput(
+>>>     prompt_with_metadata=prompt_with_metadata, target=completion, model=model
+>>> )
+>>> output = text_highlight.run(input, InMemoryLogger(name="Highlight"))
+
+
+
+
+
+
+run(input: TextHighlightInput, logger: DebugLogger) TextHighlightOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.core.text_highlight.TextHighlightInput(*, prompt_with_metadata: PromptWithMetadata, target: str, model: str, focus_ranges: frozenset[str] = frozenset({}))[source]
+

Bases: BaseModel

+

The input for a text highlighting task.

+
+
+prompt_with_metadata
+

From client’s PromptTemplate. Includes both the actual ‘Prompt’ as well as text range information. +Supports liquid-template-language-style {% promptrange range_name %}/{% endpromptrange %} for range.

+
+
Type:
+

intelligence_layer.core.prompt_template.PromptWithMetadata

+
+
+
+ +
+
+target
+

The target that should be explained. Expected to follow the prompt.

+
+
Type:
+

str

+
+
+
+ +
+
+model
+

A valid Aleph Alpha model name.

+
+
Type:
+

str

+
+
+
+ +
+
+focus_ranges
+

The ranges contained in prompt_with_metadata the returned highlights stem from. That means that each returned +highlight overlaps with at least one character with one of the ranges listed here. +If this set is empty highlights of the entire prompt are returned.

+
+
Type:
+

frozenset[str]

+
+
+
+ +
+
+focus_ranges: frozenset[str]
+
+ +
+
+model: str
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'focus_ranges': FieldInfo(annotation=frozenset[str], required=False, default=frozenset()), 'model': FieldInfo(annotation=str, required=True), 'prompt_with_metadata': FieldInfo(annotation=PromptWithMetadata, required=True), 'target': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+prompt_with_metadata: PromptWithMetadata
+
+ +
+
+target: str
+
+ +
+ +
+
+class intelligence_layer.core.text_highlight.TextHighlightOutput(*, highlights: Sequence[ScoredTextHighlight])[source]
+

Bases: BaseModel

+

The output of a text highlighting task.

+
+
+highlights
+

A sequence of ‘ScoredTextHighlight’s.

+
+
Type:
+

Sequence[intelligence_layer.core.text_highlight.ScoredTextHighlight]

+
+
+
+ +
+
+highlights: Sequence[ScoredTextHighlight]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'highlights': FieldInfo(annotation=Sequence[ScoredTextHighlight], required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+ +
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.html b/intelligence_layer.html new file mode 100644 index 000000000..4a8a38cb5 --- /dev/null +++ b/intelligence_layer.html @@ -0,0 +1,595 @@ + + + + + + + + intelligence_layer package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.use_cases.classify.html b/intelligence_layer.use_cases.classify.html new file mode 100644 index 000000000..beae53a10 --- /dev/null +++ b/intelligence_layer.use_cases.classify.html @@ -0,0 +1,607 @@ + + + + + + + + intelligence_layer.use_cases.classify package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer.use_cases.classify package

+
+

Submodules

+
+
+

intelligence_layer.use_cases.classify.classify module

+
+
+class intelligence_layer.use_cases.classify.classify.AggregatedClassifyEvaluation(*, percentage_correct: float, evaluations: Sequence[ClassifyEvaluation])[source]
+

Bases: BaseModel

+

The aggregated evaluation of a single label classify implementation against a dataset.

+
+
+percentage_correct
+

Percentage of answers that were considered to be correct

+
+
Type:
+

float

+
+
+
+ +
+
+evaluation
+

The actual evaluations

+
+ +
+
+evaluations: Sequence[ClassifyEvaluation]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'evaluations': FieldInfo(annotation=Sequence[ClassifyEvaluation], required=True), 'percentage_correct': FieldInfo(annotation=float, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+percentage_correct: float
+
+ +
+ +
+
+class intelligence_layer.use_cases.classify.classify.Classify[source]
+

Bases: Task[ClassifyInput, ClassifyOutput]

+

Placeholder class for any classifier implementation.

+
+
+abstract run(input: Input, logger: DebugLogger) Output
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.classify.classify.ClassifyEvaluation(*, correct: bool, output: ClassifyOutput)[source]
+

Bases: BaseModel

+

The evaluation of a single label classification run.

+
+
+correct
+

Was the highest scoring class from the output in the set of “correct classes”

+
+
Type:
+

bool

+
+
+
+ +
+
+output
+

The actual output from the task run

+
+
Type:
+

intelligence_layer.use_cases.classify.classify.ClassifyOutput

+
+
+
+ +
+
+correct: bool
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'correct': FieldInfo(annotation=bool, required=True), 'output': FieldInfo(annotation=ClassifyOutput, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+output: ClassifyOutput
+
+ +
+ +
+
+class intelligence_layer.use_cases.classify.classify.ClassifyEvaluator(task: Classify)[source]
+

Bases: Evaluator[ClassifyInput, Sequence[str], ClassifyEvaluation, AggregatedClassifyEvaluation]

+
+
+aggregate(evaluations: Sequence[ClassifyEvaluation]) AggregatedClassifyEvaluation[source]
+

Evaluator-specific method for aggregating individual Evaluations into report-like Aggregated Evaluation.

+
+ +
+
+evaluate(input: ClassifyInput, logger: DebugLogger, expected_output: Sequence[str]) ClassifyEvaluation[source]
+

Executes the evaluation for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.classify.classify.ClassifyInput(*, chunk: Chunk, labels: frozenset[str])[source]
+

Bases: BaseModel

+

Input for a classification task.

+
+
+chunk
+

text to be classified.

+
+
Type:
+

intelligence_layer.core.task.Chunk

+
+
+
+ +
+
+labels
+

Possible labels the model will choose a label from

+
+
Type:
+

frozenset[str]

+
+
+
+ +
+
+chunk: Chunk
+
+ +
+
+labels: frozenset[str]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'chunk': FieldInfo(annotation=NewType, required=True), 'labels': FieldInfo(annotation=frozenset[str], required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+ +
+
+class intelligence_layer.use_cases.classify.classify.ClassifyOutput(*, scores: Mapping[str, Probability])[source]
+

Bases: BaseModel

+

Output for a single label classification task.

+
+
+scores
+

Mapping of the provided label (key) to corresponding score (value). +The score represents how sure the model is that this is the correct label. +This will be a value between 0 and 1. +The sum of all probabilities will be 1.

+
+
Type:
+

Mapping[str, intelligence_layer.core.task.Probability]

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'scores': FieldInfo(annotation=Mapping[str, NewType], required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+scores: Mapping[str, Probability]
+
+ +
+ +
+
+

intelligence_layer.use_cases.classify.embedding_based_classify module

+
+
+class intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassify(labels_with_examples: Sequence[LabelWithExamples], client: Client, scoring: EmbeddingBasedClassifyScoring = EmbeddingBasedClassifyScoring.MEAN_TOP_5)[source]
+

Bases: Classify

+

Task that classifies a given input text based on examples.

+

The input contains a complete set of all possible labels. The output will return a score +for each possible label. Scores will be between 0 and 1 but do not have to add up to one. +On initiation, provide a list of examples for each label.

+

This methodology works best with a larger number of examples per label and with labels +that consist of easily definable semantic clusters.

+
+
Parameters:
+
    +
  • labels_with_examples – Examples to be used for classification.

  • +
  • client – Aleph Alpha client instance for running model related API calls.

  • +
  • scoring – Configure how to calculate the final score.

  • +
+
+
+
+
+METADATA_LABEL_NAME
+

The metadata field for label name for the InMemoryRetriever +instance.

+
+ +

Example

+
>>> labels_with_examples = [
+>>>     LabelWithExamples(
+>>>         name="positive",
+>>>         examples=[
+>>>             "I really like this.",
+>>>         ],
+>>>     ),
+>>>     LabelWithExamples(
+>>>         name="negative",
+>>>         examples=[
+>>>             "I really dislike this.",
+>>>         ],
+>>>     ),
+>>> ]
+>>> client = Client(token="AA_TOKEN")
+>>> task = EmbeddingBasedClassify(labels_with_examples, client)
+>>> input = ClassifyInput(
+>>>     text="This is a happy text.",
+>>>     labels={"positive", "negative"}
+>>> )
+>>> logger = InMemoryLogger(name="Classify")
+>>> output = task.run(input, logger)
+>>> print(output.scores["positive"])
+0.7
+
+
+
+
+METADATA_LABEL_NAME = 'label'
+
+ +
+
+run(input: ClassifyInput, logger: DebugLogger) ClassifyOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassifyScoring(value)[source]
+

Bases: Enum

+

Specify the type of scoring to use.

+
+
+MAX
+

Takes the mean of the top match, i.e., the max.

+
+ +
+
+MEAN_TOP_5
+

Takes the mean of the top 5 matches.

+
+ +
+
+MAX = 1
+
+ +
+
+MEAN_TOP_5 = 5
+
+ +
+ +
+
+class intelligence_layer.use_cases.classify.embedding_based_classify.LabelWithExamples(*, name: str, examples: Sequence[str])[source]
+

Bases: BaseModel

+

Defines a label and the list of examples making it up.

+
+
+name
+

Name of the label.

+
+
Type:
+

str

+
+
+
+ +
+
+examples
+

The texts defining the example. Should be similar in structure +and semantics to the texts to be classified on inference.

+
+
Type:
+

Sequence[str]

+
+
+
+ +
+
+examples: Sequence[str]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'examples': FieldInfo(annotation=Sequence[str], required=True), 'name': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+name: str
+
+ +
+ +
+
+

intelligence_layer.use_cases.classify.single_label_classify module

+
+
+class intelligence_layer.use_cases.classify.single_label_classify.SingleLabelClassify(client: Client)[source]
+

Bases: Classify

+

Task that classifies a given input text with one of the given classes.

+

The input contains a complete set of all possible labels. The output will return a score for +each possible label. All scores will add up to 1 and are relative to each other. The highest +score is given to the most likely class.

+

This methodology works best for classes that are easily understood, and don’t require an +explanation or examples.

+
+
Parameters:
+

client – Aleph Alpha client instance for running model related API calls.

+
+
+
+
+PROMPT_TEMPLATE_STR
+

The prompt template used for answering the question. +‘text’ and ‘labels’ will be inserted here.

+
+ +
+
+MODEL
+

A valid Aleph Alpha model name.

+
+
Type:
+

str

+
+
+
+ +

Example

+
>>> client = Client(token="AA_TOKEN")
+>>> task = SingleLabelClassify(client)
+>>> input = ClassifyInput(
+        text="This is a happy text.",
+        labels={"positive", "negative"}
+    )
+>>> logger = InMemoryLogger(name="Classify")
+>>> output = task.run(input, logger)
+>>> print(output.scores["positive"])
+0.9
+
+
+
+
+MODEL: str = 'luminous-base-control'
+
+ +
+
+PROMPT_TEMPLATE: str = '### Instruction:\nIdentify a class that describes the text adequately.\nReply with only the class label.\n\n### Input:\n{{text}}\n\n### Response:'
+
+ +
+
+run(input: ClassifyInput, logger: DebugLogger) ClassifyOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.classify.single_label_classify.TreeNode(token: Token | None = None, prob: Probability | None = None)[source]
+

Bases: object

+
+
+find_child(token: Token) TreeNode | None[source]
+
+ +
+
+insert_path(path: Sequence[TokenWithProb]) None[source]
+
+ +
+
+insert_without_calculation(path: Sequence[TokenWithProb]) None[source]
+

Inserts a path into the tree without changing the original probability

+

Temporarily here until we change this data structure to be more versatile

+
+ +
+
+normalize_probs() None[source]
+
+ +
+
+path(tokens: Iterable[Token]) Iterable[TokenWithProb][source]
+
+ +
+ +
+
+intelligence_layer.use_cases.classify.single_label_classify.to_aa_tokens_prompt(tokens: Sequence[Token]) Prompt[source]
+
+ +
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.use_cases.html b/intelligence_layer.use_cases.html new file mode 100644 index 000000000..6463d007c --- /dev/null +++ b/intelligence_layer.use_cases.html @@ -0,0 +1,396 @@ + + + + + + + + intelligence_layer.use_cases package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer.use_cases package

+
+

Subpackages

+
+ +
+
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.use_cases.qa.html b/intelligence_layer.use_cases.qa.html new file mode 100644 index 000000000..6e4ea52c6 --- /dev/null +++ b/intelligence_layer.use_cases.qa.html @@ -0,0 +1,711 @@ + + + + + + + + intelligence_layer.use_cases.qa package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer.use_cases.qa package

+
+

Submodules

+
+
+

intelligence_layer.use_cases.qa.long_context_qa module

+
+
+class intelligence_layer.use_cases.qa.long_context_qa.LongContextQa(client: Client, max_tokens_in_chunk: int = 512, k: int = 4, model: str = 'luminous-supreme-control')[source]
+

Bases: Task[LongContextQaInput, MultipleChunkQaOutput]

+

Answer a question on the basis of a (lengthy) document.

+

Best for answering a question on the basis of a long document, where the length +of text exceeds the context length of a model (e.g. 2048 tokens for the luminous models).

+
+

Note

+
    +
  • Creates instance of InMemoryRetriever on the fly.

  • +
  • model provided should be a control-type model.

  • +
+
+
+
Parameters:
+
    +
  • client – Aleph Alpha client instance for running model related API calls.

  • +
  • max_tokens_in_chunk – The input text will be split into chunks to fit the context window. +Used to tweak the length of the chunks.

  • +
  • k – The number of top relevant chunks to retrieve.

  • +
  • model – A valid Aleph Alpha model name.

  • +
+
+
+

Example

+
>>> client = Client(os.getenv("AA_TOKEN"))
+>>> task = LongContextQa(client)
+>>> input = LongContextQaInput(text="Lengthy text goes here...", question="Where does the text go?")
+>>> logger = InMemoryDebugLogger(name="Long Context QA")
+>>> output = task.run(input, logger)
+
+
+
+
+run(input: LongContextQaInput, logger: DebugLogger) MultipleChunkQaOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.qa.long_context_qa.LongContextQaInput(*, text: str, question: str)[source]
+

Bases: BaseModel

+

The input for a LongContextQa task.

+
+
+text
+

Text of arbitrary length on the basis of which the question is to be answered.

+
+
Type:
+

str

+
+
+
+ +
+
+question
+

The question for the text.

+
+
Type:
+

str

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'question': FieldInfo(annotation=str, required=True), 'text': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+question: str
+
+ +
+
+text: str
+
+ +
+ +
+
+

intelligence_layer.use_cases.qa.multiple_chunk_qa module

+
+
+class intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQa(client: Client, model: str = 'luminous-supreme-control')[source]
+

Bases: Task[MultipleChunkQaInput, MultipleChunkQaOutput]

+

Answer a question on the basis of a list of text chunks.

+

Uses Aleph Alpha models to generate a natural language answer based on multiple text chunks. +Best for longer texts that are already split into smaller units (chunks). +Relies on SingleChunkQa to generate answers for each chunk and then merges the answers into a single final answer. +Includes logic to return ‘answer = None’ if the language model determines that the question cannot be +reliably answered on the basis of the chunks.

+
+

Note

+

model provided should be a control-type model.

+
+
+
Parameters:
+
    +
  • client – Aleph Alpha client instance for running model related API calls.

  • +
  • model – A valid Aleph Alpha model name.

  • +
+
+
+
+
+MERGE_ANSWERS_INSTRUCTION
+

The instruction template used for combining multiple answers into one.

+
+ +

Example

+
>>> client = Client(token="AA_TOKEN")
+>>> task = MultipleChunkQa(client)
+>>> input = MultipleChunkQaInput(
+>>>     chunks=["Tina does not like pizza.", "Mike is a big fan of pizza."],
+>>>     question="Who likes pizza?"
+>>> )
+>>> logger = InMemoryLogger(name="Multiple Chunk QA")
+>>> output = task.run(input, logger)
+>>> print(output.answer)
+Mike likes pizza.
+
+
+
+
+MERGE_ANSWERS_INSTRUCTION = "You will be given a number of Answers to a Question. Based on them, generate a single final answer.\nCondense multiple answers into a single answer. Rely only on the provided answers. Don't use the world's knowledge. The answer should combine the individual answers. If the answers contradict each other, e.g., one saying that the colour is green and the other saying that the colour is black, say that there are contradicting answers saying the colour is green or the colour is black."
+
+ +
+
+run(input: MultipleChunkQaInput, logger: DebugLogger) MultipleChunkQaOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaInput(*, chunks: Sequence[Chunk], question: str)[source]
+

Bases: BaseModel

+

The input for a MultipleChunkQa task.

+
+
+chunks
+

The list of chunks that will be used to answer the question. +Can be arbitrarily long list of chunks.

+
+
Type:
+

Sequence[intelligence_layer.core.task.Chunk]

+
+
+
+ +
+
+question
+

The question that will be answered based on the chunks.

+
+
Type:
+

str

+
+
+
+ +
+
+chunks: Sequence[Chunk]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'chunks': FieldInfo(annotation=Sequence[NewType], required=True), 'question': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+question: str
+
+ +
+ +
+
+class intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaOutput(*, answer: str | None, subanswers: Sequence[Subanswer])[source]
+

Bases: BaseModel

+

The output of a MultipleChunkQa task.

+
+
+answer
+

The answer generated by the task. Can be a string or None (if no answer was found).

+
+
Type:
+

str | None

+
+
+
+ +
+
+subanswers
+

All the subanswers used to generate the answer.

+
+
Type:
+

Sequence[intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer]

+
+
+
+ +
+
+answer: str | None
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'answer': FieldInfo(annotation=Union[str, NoneType], required=True), 'subanswers': FieldInfo(annotation=Sequence[Subanswer], required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+subanswers: Sequence[Subanswer]
+
+ +
+ +
+
+class intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer(*, answer: str, chunk: Chunk, highlights: Sequence[str])[source]
+

Bases: BaseModel

+

Individual answer based on just one of the multiple chunks.

+
+
+answer
+

The answer generated by the task. Can be a string or None (if no answer was found).

+
+
Type:
+

str

+
+
+
+ +
+
+chunk
+

Piece of the original text that answer is based on.

+
+
Type:
+

intelligence_layer.core.task.Chunk

+
+
+
+ +
+
+highlights
+

The specific sentences that explain the answer the most. +These are generated by the TextHighlight Task.

+
+
Type:
+

Sequence[str]

+
+
+
+ +
+
+answer: str
+
+ +
+
+chunk: Chunk
+
+ +
+
+highlights: Sequence[str]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'answer': FieldInfo(annotation=str, required=True), 'chunk': FieldInfo(annotation=NewType, required=True), 'highlights': FieldInfo(annotation=Sequence[str], required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+ +
+
+

intelligence_layer.use_cases.qa.retriever_based_qa module

+
+
+class intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQa(client: Client, retriever: BaseRetriever, model: str = 'luminous-supreme-control')[source]
+

Bases: Task[RetrieverBasedQaInput, MultipleChunkQaOutput]

+

Answer a question based on documents found by a retriever.

+

RetrieverBasedQa` is a task that answers a question based on a set of documents. +Relies on some retriever of type BaseRetriever that has the ability to access texts.

+
+

Note

+

model provided should be a control-type model.

+
+
+
Parameters:
+
    +
  • client – Aleph Alpha client instance for running model related API calls.

  • +
  • retriever – Used to access and return a set of texts.

  • +
  • model – A valid Aleph Alpha model name.

  • +
+
+
+

Example

+
>>> token = os.getenv("AA_TOKEN")
+>>> client = Client(token)
+>>> document_index = DocumentIndex(token)
+>>> retriever = DocumentIndexRetriever(document_index, "my_namespace", "ancient_facts_collection", 3)
+>>> task = RetrieverBasedQa(client, retriever)
+>>> input_data = RetrieverBasedQaInput(question="When was Rome founded?")
+>>> logger = InMemoryDebugLogger(name="Retriever Based QA")
+>>> output = task.run(input_data, logger)
+>>> print(output.answer)
+Rome was founded in 753 BC.
+
+
+
+
+run(input: RetrieverBasedQaInput, logger: DebugLogger) MultipleChunkQaOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQaInput(*, question: str)[source]
+

Bases: BaseModel

+

The input for a RetrieverBasedQa task.

+
+
+question
+

The question to be answered based on the documents accessed +by the retriever.

+
+
Type:
+

str

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'question': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+question: str
+
+ +
+ +
+
+

intelligence_layer.use_cases.qa.single_chunk_qa module

+
+
+class intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQa(client: Client, model: str = 'luminous-supreme-control')[source]
+

Bases: Task[SingleChunkQaInput, SingleChunkQaOutput]

+

Answer a question on the basis of one chunk.

+

Uses Aleph Alpha models to generate a natural language answer for a text chunk given a question. +Will answer None if the language model determines that the question cannot be answered on the +basis of the text.

+
+

Note

+

model provided should be a control-type model.

+
+
+
Parameters:
+
    +
  • client – Aleph Alpha client instance for running model related API calls.

  • +
  • model – A valid Aleph Alpha model name.

  • +
+
+
+
+
+PROMPT_TEMPLATE_STR
+

The prompt template used for answering the question. +Includes liquid logic interpreted by ‘PromptTemplate’ specifically for generating +explainability-based highlights using TextHighlight.

+
+ +
+
+NO_ANSWER_STR
+

The string to be generated by the model in case no answer can be found.

+
+ +

Example

+
>>> client = Client(os.getenv("AA_TOKEN"))
+>>> task = SingleChunkQa(client)
+>>> input = SingleChunkQaInput(
+>>>     chunk="Tina does not like pizza. However, Mike does.",
+>>>     question="Who likes pizza?"
+>>> )
+>>> logger = InMemoryLogger(name="Single Chunk QA")
+>>> output = task.run(input, logger)
+>>> print(output.answer)
+Mike likes pizza.
+
+
+
+
+NO_ANSWER_STR = 'NO_ANSWER_IN_TEXT'
+
+ +
+
+PROMPT_TEMPLATE_STR = '### Instruction:\n{{question}}\nIf there\'s no answer, say "{{no_answer_text}}".\n\n### Input:\n{% promptrange text %}{{text}}{% endpromptrange %}\n\n### Response:'
+
+ +
+
+run(input: SingleChunkQaInput, logger: DebugLogger) SingleChunkQaOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaInput(*, chunk: Chunk, question: str)[source]
+

Bases: BaseModel

+

The input for a SingleChunkQa task.

+
+
+chunk
+

The (short) text to be asked about. Usually measures one or a few paragraph(s). +Can’t be longer than the context length of the model used minus the size of the system prompt.

+
+
Type:
+

intelligence_layer.core.task.Chunk

+
+
+
+ +
+
+question
+

The question to be asked by about the chunk.

+
+
Type:
+

str

+
+
+
+ +
+
+chunk: Chunk
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'chunk': FieldInfo(annotation=NewType, required=True), 'question': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+question: str
+
+ +
+ +
+
+class intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaOutput(*, answer: str | None, highlights: Sequence[str])[source]
+

Bases: BaseModel

+

The output of a SingleChunkQa task.

+
+
+answer
+

The answer generated by the task. Can be a string or None (if no answer was found).

+
+
Type:
+

str | None

+
+
+
+ +
+
+highlights
+

Highlights indicating which parts of the chunk contributed to the answer. +Each highlight is a quote from the text.

+
+
Type:
+

Sequence[str]

+
+
+
+ +
+
+answer: str | None
+
+ +
+
+highlights: Sequence[str]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'answer': FieldInfo(annotation=Union[str, NoneType], required=True), 'highlights': FieldInfo(annotation=Sequence[str], required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+ +
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.use_cases.search.html b/intelligence_layer.use_cases.search.html new file mode 100644 index 000000000..a5fbfa7d6 --- /dev/null +++ b/intelligence_layer.use_cases.search.html @@ -0,0 +1,334 @@ + + + + + + + + intelligence_layer.use_cases.search package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer.use_cases.search package

+
+

Submodules

+
+ +
+

intelligence_layer.use_cases.search.search module

+
+
+class intelligence_layer.use_cases.search.search.Search(retriever: BaseRetriever)[source]
+

Bases: Task[SearchInput, SearchOutput]

+

Performs search to find documents.

+

Given a query, this task will utilize a retriever to fetch relevant text search results. +Each result consists of a string representation of the content and an associated score +indicating its relevance to the provided query.

+
+
Parameters:
+

retriever – Implements logic to retrieve matching texts to the query.

+
+
+

Example

+
>>> document_index = DocumentIndex(token)
+>>> retriever = DocumentIndexRetriever(document_index, "my_namespace", "country_facts_collection", 3)
+>>> task = Search(retriever)
+>>> input = SearchInput(
+>>>     query="When did East and West Germany reunite?"
+>>> )
+>>> logger = InMemoryLogger(name="Search")
+>>> output = task.run(input, logger)
+>>> print(output.results[0].text[-5:])
+1990.
+
+
+
+
+run(input: SearchInput, logger: DebugLogger) SearchOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.search.search.SearchInput(*, query: str)[source]
+

Bases: BaseModel

+

The input for a Search task.

+
+
+query
+

The text to be searched with.

+
+
Type:
+

str

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'query': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+query: str
+
+ +
+ +
+
+class intelligence_layer.use_cases.search.search.SearchOutput(*, results: Sequence[SearchResult])[source]
+

Bases: BaseModel

+

The output of a Search task.

+
+
+results
+

Each result contains a text and corresponding score.

+
+
Type:
+

Sequence[intelligence_layer.connectors.retrievers.base_retriever.SearchResult]

+
+
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'results': FieldInfo(annotation=Sequence[SearchResult], required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+results: Sequence[SearchResult]
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/intelligence_layer.use_cases.summarize.html b/intelligence_layer.use_cases.summarize.html new file mode 100644 index 000000000..dec5a1005 --- /dev/null +++ b/intelligence_layer.use_cases.summarize.html @@ -0,0 +1,275 @@ + + + + + + + + intelligence_layer.use_cases.summarize package — Intelligence Layer documentation + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

intelligence_layer.use_cases.summarize package

+
+

Submodules

+
+
+

intelligence_layer.use_cases.summarize.summarize module

+
+
+class intelligence_layer.use_cases.summarize.summarize.ShortBodySummarize(client: Client, model: str = 'luminous-supreme-control')[source]
+

Bases: Task[SummarizeInput, SummarizeOutput]

+

Summarises a section into a short text.

+

Generate a short body natural language summary. +Will also return highlights explaining which parts of the input contributed strongly to the completion.

+
+

Note

+

model provided should be a control-type model.

+
+
+
Parameters:
+
    +
  • client – Aleph Alpha client instance for running model related API calls.

  • +
  • model – A valid Aleph Alpha model name.

  • +
+
+
+
+
+MAXIMUM_RESPONSE_TOKENS
+

The maximum number of tokens the summary will contain.

+
+ +
+
+INSTRUCTION
+

The verbal instruction sent to the model to make it generate the summary.

+
+ +

Example

+
>>> client = Client(os.getenv("AA_TOKEN"))
+>>> task = ShortBodySummarize(client)
+>>> input = SummarizeInput(
+>>>     chunk="This is a story about pizza. Tina hates pizza. However, Mike likes it. Pete strongly believes that pizza is the best thing to exist."
+>>> )
+>>> logger = InMemoryLogger(name="Short Body Summarize")
+>>> output = task.run(input, logger)
+>>> print(output.summary)
+Tina does not like pizza, but Mike and Pete do.
+
+
+
+
+INSTRUCTION = 'Summarize in just one or two sentences.'
+
+ +
+
+MAXIMUM_RESPONSE_TOKENS = 128
+
+ +
+
+run(input: SummarizeInput, logger: DebugLogger) SummarizeOutput[source]
+

Executes the process for this use-case.

+
+ +
+ +
+
+class intelligence_layer.use_cases.summarize.summarize.SummarizeInput(*, chunk: Chunk)[source]
+

Bases: BaseModel

+

The input for a Summarize task.

+
+
+chunk
+

The text chunk to be summarized.

+
+
Type:
+

intelligence_layer.core.task.Chunk

+
+
+
+ +
+
+chunk: Chunk
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'chunk': FieldInfo(annotation=NewType, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+ +
+
+class intelligence_layer.use_cases.summarize.summarize.SummarizeOutput(*, summary: str, highlights: Sequence[str])[source]
+

Bases: BaseModel

+

The output of a Summarize task.

+
+
+summary
+

The summary generated by the task.

+
+
Type:
+

str

+
+
+
+ +
+
+highlights
+

Highlights indicating which parts of the chunk contributed to the summary. +Each highlight is a quote from the text.

+
+
Type:
+

Sequence[str]

+
+
+
+ +
+
+highlights: Sequence[str]
+
+ +
+
+model_config: ClassVar[ConfigDict] = {}
+

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

+
+ +
+
+model_fields: ClassVar[dict[str, FieldInfo]] = {'highlights': FieldInfo(annotation=Sequence[str], required=True), 'summary': FieldInfo(annotation=str, required=True)}
+

Metadata about the fields defined on the model, +mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

+

This replaces Model.__fields__ from Pydantic V1.

+
+ +
+
+summary: str
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/modules.html b/modules.html new file mode 100644 index 000000000..da1bf7703 --- /dev/null +++ b/modules.html @@ -0,0 +1,146 @@ + + + + + + + + src — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + +
+
+ +
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/objects.inv b/objects.inv new file mode 100644 index 000000000..e1591cd36 Binary files /dev/null and b/objects.inv differ diff --git a/py-modindex.html b/py-modindex.html new file mode 100644 index 000000000..c48e40142 --- /dev/null +++ b/py-modindex.html @@ -0,0 +1,267 @@ + + + + + + + Python Module Index — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + +

Python Module Index

+ +
+ i +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ i
+ intelligence_layer +
    + intelligence_layer.connectors +
    + intelligence_layer.connectors.document_index +
    + intelligence_layer.connectors.retrievers +
    + intelligence_layer.connectors.retrievers.base_retriever +
    + intelligence_layer.connectors.retrievers.document_index_retriever +
    + intelligence_layer.connectors.retrievers.in_memory_retriever +
    + intelligence_layer.core +
    + intelligence_layer.core.complete +
    + intelligence_layer.core.echo +
    + intelligence_layer.core.evaluator +
    + intelligence_layer.core.explain +
    + intelligence_layer.core.logger +
    + intelligence_layer.core.prompt_template +
    + intelligence_layer.core.task +
    + intelligence_layer.core.text_highlight +
    + intelligence_layer.use_cases +
    + intelligence_layer.use_cases.classify +
    + intelligence_layer.use_cases.classify.classify +
    + intelligence_layer.use_cases.classify.embedding_based_classify +
    + intelligence_layer.use_cases.classify.single_label_classify +
    + intelligence_layer.use_cases.qa +
    + intelligence_layer.use_cases.qa.long_context_qa +
    + intelligence_layer.use_cases.qa.multiple_chunk_qa +
    + intelligence_layer.use_cases.qa.retriever_based_qa +
    + intelligence_layer.use_cases.qa.single_chunk_qa +
    + intelligence_layer.use_cases.search +
    + intelligence_layer.use_cases.search.filter_search +
    + intelligence_layer.use_cases.search.search +
    + intelligence_layer.use_cases.summarize +
    + intelligence_layer.use_cases.summarize.summarize +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/search.html b/search.html new file mode 100644 index 000000000..02af9aad4 --- /dev/null +++ b/search.html @@ -0,0 +1,121 @@ + + + + + + + Search — Intelligence Layer documentation + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

Search

+ + + + +

+ Searching for multiple words only shows matches that contain + all words. +

+ + +
+ + + +
+ + + +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/searchindex.js b/searchindex.js new file mode 100644 index 000000000..bea1e0579 --- /dev/null +++ b/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["index", "intelligence_layer", "intelligence_layer.connectors", "intelligence_layer.connectors.retrievers", "intelligence_layer.core", "intelligence_layer.use_cases", "intelligence_layer.use_cases.classify", "intelligence_layer.use_cases.qa", "intelligence_layer.use_cases.search", "intelligence_layer.use_cases.summarize", "modules"], "filenames": ["index.rst", "intelligence_layer.rst", "intelligence_layer.connectors.rst", "intelligence_layer.connectors.retrievers.rst", "intelligence_layer.core.rst", "intelligence_layer.use_cases.rst", "intelligence_layer.use_cases.classify.rst", "intelligence_layer.use_cases.qa.rst", "intelligence_layer.use_cases.search.rst", "intelligence_layer.use_cases.summarize.rst", "modules.rst"], "titles": ["Welcome to Intelligence Layer\u2019s documentation!", "intelligence_layer package", "intelligence_layer.connectors package", "intelligence_layer.connectors.retrievers package", "intelligence_layer.core package", "intelligence_layer.use_cases package", "intelligence_layer.use_cases.classify package", "intelligence_layer.use_cases.qa package", "intelligence_layer.use_cases.search package", "intelligence_layer.use_cases.summarize package", "src"], "terms": {"src": 0, "intelligence_lay": [0, 10], "packag": [0, 10], "index": [0, 2, 4], "modul": [0, 10], "search": [0, 1, 2, 3, 5], "page": 0, "connector": [1, 8, 10], "retriev": [1, 2, 7, 8], "submodul": [1, 5, 10], "base_retriev": [1, 2, 8], "document_index_retriev": [1, 2], "in_memory_retriev": [1, 2, 8], "document_index": [1, 3, 7, 8, 10], "documentindex": [1, 2, 3, 7, 8], "add_docu": [1, 2], "create_collect": [1, 2], "delete_collect": [1, 2], "delete_docu": [1, 2], "get_docu": [1, 2], "list_docu": [1, 2], "core": [1, 6, 7, 9, 10], "complet": [1, 6, 9, 10], "run": [1, 3, 4, 5, 6, 7, 8, 9], "completeinput": [1, 4], "request": [1, 4], "model": [1, 3, 4, 5, 6, 7, 8, 9], "model_config": [1, 2, 3, 4, 5, 6, 7, 8, 9], "model_field": [1, 2, 3, 4, 5, 6, 7, 8, 9], "completeoutput": [1, 4], "respons": [1, 4, 6, 7], "instruct": [1, 4, 5, 6, 7, 9], "instruction_prompt_templ": [1, 4], "instructinput": [1, 4], "input": [1, 4, 6, 7, 8, 9], "response_prefix": [1, 4], "maximum_response_token": [1, 4, 5, 9], "instructoutput": [1, 4], "prompt_with_metadata": [1, 4], "echo": [1, 10], "echoinput": [1, 4], "prompt": [1, 4, 6, 7], "expected_complet": [1, 4], "echooutput": [1, 4], "tokens_with_log_prob": [1, 4], "echotask": [1, 4], "prompt_templ": [1, 5, 6, 10], "tokenwithprob": [1, 4, 6], "prob": [1, 4, 6], "token": [1, 2, 4, 6, 7, 8, 9], "evalu": [1, 5, 6, 10], "dataset": [1, 4, 6], "name": [1, 2, 3, 4, 5, 6, 7, 8, 9], "exampl": [1, 2, 3, 4, 5, 6, 7, 8, 9], "aggreg": [1, 4, 5, 6], "evaluate_dataset": [1, 4], "expected_output": [1, 4, 6], "ident": [1, 4], "explain": [1, 7, 9, 10], "explaininput": [1, 4], "explainoutput": [1, 4], "logger": [1, 6, 7, 8, 9, 10], "debuglogg": [1, 4, 6, 7, 8, 9], "log": [1, 4], "span": [1, 4], "task_span": [1, 4], "endspan": [1, 4], "uuid": [1, 4], "end": [1, 4], "endtask": [1, 4], "output": [1, 4, 5, 6, 7, 8, 9], "filedebuglogg": [1, 4], "filespan": [1, 4], "filetaskspan": [1, 4], "record_output": [1, 4], "inmemorydebuglogg": [1, 4, 7], "inmemoryspan": [1, 4], "end_timestamp": [1, 4], "start_timestamp": [1, 4], "inmemorytaskspan": [1, 4], "jsonseri": [1, 4], "root": [1, 4], "logentri": [1, 4], "messag": [1, 4], "valu": [1, 3, 4, 6], "timestamp": [1, 4], "loglin": [1, 4], "entry_typ": [1, 4], "entri": [1, 4], "noopdebuglogg": [1, 4], "nooptaskspan": [1, 4], "plainentri": [1, 4], "parent": [1, 4], "startspan": [1, 4], "start": [1, 4], "starttask": [1, 4], "taskspan": [1, 4], "promptitemcursor": [1, 4], "item": [1, 4], "promptrang": [1, 4, 7], "promptrangecontext": [1, 4], "add_placeholder_rang": [1, 4], "autoescap": [1, 4], "counter": [1, 4], "disabled_tag": [1, 4], "env": [1, 4], "global": [1, 4], "local_namespace_size_carri": [1, 4], "local": [1, 2, 4], "loop_iteration_carri": [1, 4], "loop": [1, 4], "parent_context": [1, 4], "placeholder_range_nam": [1, 4], "scope": [1, 4], "tag_namespac": [1, 4], "templat": [1, 4, 6, 7], "promptrangenod": [1, 4], "render_to_output": [1, 4], "promptrangetag": [1, 4], "pars": [1, 4], "prompttempl": [1, 4, 7], "embed_prompt": [1, 4], "placehold": [1, 4, 6], "to_prompt": [1, 4], "to_prompt_with_metadata": [1, 4], "promptwithmetadata": [1, 4], "rang": [1, 4], "textcursor": [1, 4], "posit": [1, 4, 6], "task": [1, 6, 7, 8, 9, 10], "chunk": [1, 4, 5, 6, 7, 9], "run_concurr": [1, 4], "token_id": [1, 4], "batch": [1, 4], "text_highlight": [1, 10], "scoredtexthighlight": [1, 4], "text": [1, 2, 3, 4, 5, 6, 7, 8, 9], "score": [1, 2, 3, 4, 5, 6, 8], "texthighlight": [1, 4, 7], "texthighlightinput": [1, 4], "target": [1, 4], "focus_rang": [1, 4], "texthighlightoutput": [1, 4], "highlight": [1, 4, 5, 7, 9], "use_cas": [1, 10], "classifi": [1, 5], "embedding_based_classifi": [1, 5], "single_label_classifi": [1, 5], "qa": [1, 4, 5], "long_context_qa": [1, 5], "multiple_chunk_qa": [1, 5], "retriever_based_qa": [1, 5], "single_chunk_qa": [1, 5], "filter_search": [1, 5], "summar": [1, 4, 5], "baseretriev": [2, 3, 7, 8], "get_relevant_documents_with_scor": [2, 3], "document": [2, 3, 4, 7, 8], "metadata": [2, 3, 4, 6, 7, 8, 9], "searchresult": [2, 3, 8], "documentindexretriev": [2, 3, 7, 8], "inmemoryretriev": [2, 3, 6, 7, 8], "max_work": [2, 3], "get_filtered_documents_with_scor": [2, 3], "retrievertyp": [2, 3], "asymmetr": [2, 3], "symmetr": [2, 3], "class": [2, 3, 4, 6, 7, 8, 9], "str": [2, 3, 4, 6, 7, 8, 9], "base_document_index_url": 2, "http": [2, 3, 8], "knowledg": [2, 7], "aleph": [2, 3, 4, 6, 7, 9], "alpha": [2, 3, 4, 6, 7, 9], "com": [2, 3], "sourc": [2, 3, 4, 6, 7, 8, 9], "base": [2, 3, 4, 6, 7, 8, 9], "object": [2, 3, 4, 6], "client": [2, 3, 4, 6, 7, 8, 9], "allow": [2, 4], "handl": [2, 4], "i": [2, 3, 4, 6, 7, 9], "tool": 2, "manag": [2, 4], "collect": [2, 3, 4], "enabl": 2, "oper": 2, "creation": 2, "delet": 2, "list": [2, 4, 6, 7], "can": [2, 3, 4, 7], "store": [2, 3, 4], "either": [2, 4], "cloud": 2, "deploy": 2, "paramet": [2, 3, 4, 6, 7, 8, 9], "A": [2, 4, 6, 7, 9], "valid": [2, 4, 6, 7, 9], "api": [2, 3, 4, 6, 7, 9], "The": [2, 3, 4, 6, 7, 8, 9], "url": 2, "o": [2, 3, 4, 7, 8, 9], "getenv": [2, 3, 4, 7, 8, 9], "aa_token": [2, 3, 4, 6, 7, 8, 9], "namespac": [2, 3], "my_namespac": [2, 3, 7, 8], "germany_facts_collect": 2, "fun": 2, "fact": 2, "about": [2, 3, 4, 6, 7, 8, 9], "germani": [2, 8], "countri": 2, "locat": 2, "queri": [2, 3, 5, 8], "what": [2, 4], "capit": 2, "max_result": 2, "4": [2, 4, 7], "min_scor": 2, "0": [2, 3, 4, 6, 8], "5": [2, 3, 4, 6, 8], "none": [2, 3, 4, 6, 7], "get_chunk": 2, "bool": [2, 4, 6], "fals": [2, 3, 4], "ani": [2, 3, 4, 6], "int": [2, 3, 4, 7], "float": [2, 3, 4, 6], "abc": [3, 4], "gener": [3, 4, 7, 9], "interfac": [3, 4], "ar": [3, 4, 6, 7], "us": [3, 4, 6, 7, 8, 9], "find": [3, 8], "given": [3, 4, 6, 7, 8], "user": 3, "each": [3, 4, 6, 7, 8, 9], "implement": [3, 4, 6, 8], "own": [3, 4], "its": [3, 4, 8], "logic": [3, 4, 7, 8], "For": [3, 4], "comparison": 3, "purpos": 3, "we": [3, 4, 6], "assum": 3, "": [3, 4, 7], "between": [3, 4, 6], "1": [3, 4, 6], "abstract": [3, 4, 6], "sequenc": [3, 4, 6, 7, 8, 9], "basemodel": [3, 4, 6, 7, 8, 9], "specif": [3, 4, 6, 7], "case": [3, 4, 6, 7, 8, 9], "type": [3, 4, 6, 7, 8, 9], "json": [3, 4], "serializ": [3, 4], "classvar": [3, 4, 6, 7, 8, 9], "configdict": [3, 4, 6, 7, 8, 9], "configur": [3, 4, 6, 7, 8, 9], "should": [3, 4, 6, 7, 8, 9], "dictionari": [3, 4, 6, 7, 8, 9], "conform": [3, 4, 6, 7, 8, 9], "pydant": [3, 4, 6, 7, 8, 9], "config": [3, 4, 6, 7, 8, 9], "dict": [3, 4, 6, 7, 8, 9], "fieldinfo": [3, 4, 6, 7, 8, 9], "annot": [3, 4, 6, 7, 8, 9], "requir": [3, 4, 6, 7, 8, 9], "true": [3, 4, 6, 7, 8, 9], "field": [3, 4, 6, 7, 8, 9], "defin": [3, 4, 6, 7, 8, 9], "map": [3, 4, 6, 7, 8, 9], "thi": [3, 4, 6, 7, 8, 9], "replac": [3, 4, 6, 7, 8, 9], "__fields__": [3, 4, 6, 7, 8, 9], "from": [3, 4, 6, 7, 8, 9], "v1": [3, 4, 6, 7, 8, 9], "contain": [3, 4, 6, 8, 9], "alongsid": 3, "similar": [3, 4, 6], "wa": [3, 4, 6, 7], "Will": [3, 7, 9], "where": [3, 4, 7], "mean": [3, 4, 6], "perfect": 3, "found": [3, 7], "k": [3, 7], "threshold": 3, "through": [3, 4], "within": [3, 4], "initi": [3, 6], "most": [3, 4, 6, 7], "semanticli": 3, "our": 3, "offer": [3, 8], "function": [3, 4], "all": [3, 4, 6, 7], "hold": 3, "desir": [3, 4], "top": [3, 4, 6, 7], "number": [3, 4, 6, 7, 9], "return": [3, 4, 6, 7, 9], "mimumum": 3, "cosin": 3, "vector": 3, "airplane_facts_collect": 3, "3": [3, 4, 7, 8], "who": [3, 7], "invent": 3, "airplan": 3, "retriever_typ": 3, "memori": [3, 4], "semant": [3, 6], "qdrant": [3, 8], "github": 3, "instanc": [3, 4, 6, 7, 9], "embed": 3, "when": [3, 4, 7, 8], "against": [3, 6], "match": [3, 6, 8], "relat": [3, 4, 6, 7, 9], "call": [3, 4, 6, 7, 9], "made": 3, "searchabl": 3, "instanti": 3, "optim": [3, 4], "t": [3, 4, 6, 7], "do": [3, 4, 6, 9], "like": [3, 4, 6, 7, 9], "rain": 3, "summer": 3, "warm": 3, "so": [3, 4], "back": 3, "you": [3, 4, 7], "10": 3, "filter": [3, 5, 8], "method": [3, 4, 6, 8], "support": [3, 4], "result": [3, 4, 5, 8], "enum": [3, 6], "specifi": [3, 4, 6], "both": [3, 4], "semanticrepresent": 3, "perform": [4, 8], "access": [4, 7], "possibl": [4, 6], "onli": [4, 6, 7], "non": 4, "higher": 4, "level": 4, "below": 4, "work": [4, 6], "your": 4, "doe": [4, 7, 9], "fit": [4, 7], "ones": 4, "repres": [4, 6], "need": 4, "control": [4, 6, 7, 9], "expos": 4, "them": [4, 7], "execut": [4, 6, 7, 8, 9], "process": [4, 6, 7, 8, 9], "completionrequest": 4, "give": 4, "fine": 4, "grain": 4, "over": 4, "infer": [4, 6], "aleph_alpha_cli": 4, "completionrespons": 4, "detail": 4, "provid": [4, 6, 7, 8, 9], "properti": 4, "zero": 4, "shot": 4, "variou": 4, "llm": 4, "could": 4, "translat": 4, "more": [4, 6], "build": 4, "actual": [4, 6], "sent": [4, 9], "instructioninput": 4, "follow": 4, "german": 4, "an": [4, 6, 8], "appl": 4, "dai": 4, "keep": 4, "doctor": 4, "awai": 4, "inmemorylogg": [4, 6, 7, 8, 9], "print": [4, 6, 7, 8, 9], "ein": 4, "apfel": 4, "am": 4, "tag": 4, "h\u00e4lt": 4, "den": 4, "arzt": 4, "fern": 4, "n": [4, 6, 7], "endpromptrang": [4, 7], "endif": 4, "64": 4, "textual": 4, "direct": 4, "answer": [4, 5, 6, 7], "question": [4, 5, 6, 7], "someth": 4, "e": [4, 6, 7], "g": [4, 7], "certain": 4, "typic": 4, "lumin": [4, 6, 7, 9], "extend": 4, "string": [4, 7, 8], "prefix": 4, "steer": 4, "maximum": [4, 9], "default": 4, "correspond": [4, 6, 8], "roughli": 4, "one": [4, 6, 7, 9], "short": [4, 7, 9], "paragraph": [4, 7], "union": [4, 7], "nonetyp": [4, 7], "To": 4, "two": [4, 9], "cover": 4, "These": [4, 7], "downstream": 4, "serv": 4, "point": 4, "likelihood": 4, "examin": 4, "everi": 4, "accompani": 4, "probabl": [4, 6], "have": [4, 6], "been": 4, "scenario": 4, "analyz": 4, "expect": 4, "echotaskinput": 4, "happi": [4, 6], "6": 4, "logprob": 4, "newtyp": [4, 6, 7, 9], "expectedoutput": 4, "human": 4, "readabl": 4, "identifi": 4, "aggregatedevalu": 4, "step": 4, "some": [4, 7], "job": 4, "pass": 4, "shall": 4, "suppli": 4, "metric": 4, "come": 4, "suggest": 4, "__init__": 4, "individu": [4, 6, 7], "report": [4, 6], "entir": 4, "thread": 4, "manner": 4, "ha": [4, 7], "same": 4, "compar": 4, "receiv": 4, "default_factori": 4, "lambda": 4, "explan": [4, 6], "explanationrequest": 4, "explanationrespons": 4, "arg": 4, "kwarg": 4, "protocol": 4, "instrument": 4, "structur": [4, 6], "wai": 4, "which": [4, 7, 9], "nest": 4, "sub": 4, "emit": 4, "group": 4, "togeth": 4, "distinguish": 4, "other": [4, 6, 7], "how": [4, 6], "mai": 4, "differ": 4, "refer": 4, "see": 4, "pydanticserializ": 4, "record": 4, "relev": [4, 7, 8], "inform": 4, "part": [4, 7, 9], "By": 4, "automat": 4, "anyth": 4, "els": 4, "seem": 4, "understand": 4, "descript": 4, "data": [4, 6], "want": 4, "flexibl": 4, "thei": 4, "current": 4, "decid": 4, "child": 4, "meet": 4, "task_nam": 4, "context": [4, 7], "being": 4, "also": [4, 9], "creat": [4, 7], "depend": 4, "datetim": 4, "payload": 4, "line": 4, "indic": [4, 7, 8, 9], "exit": 4, "serializeasani": 4, "log_file_path": 4, "path": [4, 5, 6], "file": 4, "reconstruct": 4, "hierarch": 4, "natur": [4, 7, 9], "_pointer_": 4, "element": 4, "form": 4, "attribut": 4, "denot": 4, "If": [4, 7], "multipl": [4, 7], "id": 4, "abstractcontextmanag": 4, "write": 4, "out": 4, "represent": [4, 8], "via": 4, "sequenti": 4, "builtin_function_or_method": 4, "typealiastyp": 4, "sinc": 4, "__exit__": 4, "captur": 4, "onc": 4, "rootmodelroottyp": 4, "pydanticundefin": 4, "rootmodel": 4, "time": 4, "op": 4, "test": 4, "don": [4, 6, 7], "inspect": 4, "won": 4, "anoth": 4, "plain": 4, "uniqu": 4, "surround": 4, "durat": 4, "open": 4, "well": 4, "environ": 4, "copy_depth": 4, "boundtempl": 4, "liquid": [4, 7], "addit": 4, "state": 4, "mutablemap": 4, "forloop": 4, "inner": 4, "blocknod": 4, "node": 4, "buffer": 4, "textio": 4, "render": 4, "stream": 4, "tokenstream": 4, "tree": [4, 6], "template_str": 4, "languag": [4, 7, 9], "add": [4, 6], "first": 4, "save": 4, "emb": 4, "place": 4, "would": 4, "imag": 4, "from_fil": 4, "hello": 4, "endfor": 4, "world": [4, 7], "rutger": 4, "whitespac": 4, "In": 4, "user_prompt": 4, "from_token_id": 4, "2": 4, "from_text": 4, "cool": 4, "along": 4, "mark": 4, "turn": 4, "charact": 4, "abov": 4, "segment": 4, "larger": [4, 6], "smaller": [4, 7], "than": [4, 7], "size": [4, 7], "split": [4, 7], "up": [4, 6], "alia": 4, "ideal": 4, "term": 4, "rather": 4, "lower": 4, "option": [4, 8], "typevar": 4, "bound": 4, "consist": [4, 6, 8], "sever": 4, "accomplish": 4, "iter": [4, 6], "debug_logg": 4, "concurrency_limit": 4, "20": 4, "concurr": 4, "potenti": 4, "There": 4, "limit": 4, "share": 4, "prevent": 4, "queue": 4, "full": 4, "error": 4, "too": 4, "high": 4, "order": 4, "raw": 4, "instead": 4, "while": 4, "substr": 4, "regard": 4, "z": 4, "depict": 4, "neg": [4, 6], "contradict": [4, 7], "granular": 4, "promptgranular": 4, "sentenc": [4, 7, 9], "extract": 4, "prompt_template_str": [4, 5, 6, 7], "r1": 4, "frozenset": [4, 6], "includ": [4, 7], "style": 4, "range_nam": 4, "stem": 4, "That": 4, "overlap": 4, "least": 4, "here": [4, 6, 7], "set": [4, 6, 7], "empti": 4, "aggregatedclassifyevalu": [5, 6], "percentage_correct": [5, 6], "classifyevalu": [5, 6], "correct": [5, 6], "classifyinput": [5, 6], "label": [5, 6], "classifyoutput": [5, 6], "embeddingbasedclassifi": [5, 6], "metadata_label_nam": [5, 6], "embeddingbasedclassifyscor": [5, 6], "max": [5, 6], "mean_top_5": [5, 6], "labelwithexampl": [5, 6], "singlelabelclassifi": [5, 6], "treenod": [5, 6], "find_child": [5, 6], "insert_path": [5, 6], "insert_without_calcul": [5, 6], "normalize_prob": [5, 6], "to_aa_tokens_prompt": [5, 6], "longcontextqa": [5, 7], "longcontextqainput": [5, 7], "multiplechunkqa": [5, 7], "merge_answers_instruct": [5, 7], "multiplechunkqainput": [5, 7], "multiplechunkqaoutput": [5, 7], "subansw": [5, 7], "retrieverbasedqa": [5, 7], "retrieverbasedqainput": [5, 7], "singlechunkqa": [5, 7], "no_answer_str": [5, 7], "singlechunkqainput": [5, 7], "singlechunkqaoutput": [5, 7], "filtersearch": [5, 8], "filtersearchinput": [5, 8], "searchinput": [5, 8], "searchoutput": [5, 8], "shortbodysummar": [5, 9], "summarizeinput": [5, 9], "summarizeoutput": [5, 9], "summari": [5, 9], "singl": [6, 7], "percentag": 6, "were": 6, "consid": 6, "classif": 6, "highest": 6, "choos": 6, "kei": [6, 8], "sure": 6, "sum": 6, "labels_with_exampl": 6, "On": 6, "methodologi": 6, "best": [6, 7, 9], "per": 6, "easili": 6, "cluster": 6, "calcul": 6, "final": [6, 7], "realli": 6, "dislik": 6, "7": 6, "take": 6, "make": [6, 9], "rel": 6, "understood": 6, "insert": 6, "9": 6, "nidentifi": 6, "describ": 6, "adequ": 6, "nrepli": 6, "without": 6, "chang": 6, "origin": [6, 7], "temporarili": 6, "until": 6, "versatil": 6, "max_tokens_in_chunk": 7, "512": 7, "suprem": [7, 9], "basi": 7, "lengthi": 7, "long": 7, "length": 7, "exce": 7, "2048": 7, "fly": 7, "window": 7, "tweak": 7, "goe": 7, "go": 7, "arbitrari": 7, "longer": 7, "alreadi": 7, "unit": 7, "reli": 7, "merg": 7, "determin": 7, "cannot": 7, "reliabl": 7, "combin": 7, "tina": [7, 9], "pizza": [7, 9], "mike": [7, 9], "big": 7, "fan": 7, "ncondens": 7, "sai": 7, "colour": 7, "green": 7, "black": 7, "arbitrarili": 7, "just": [7, 9], "piec": 7, "abil": 7, "ancient_facts_collect": 7, "input_data": 7, "rome": 7, "753": 7, "bc": 7, "interpret": 7, "howev": [7, 9], "no_answer_in_text": 7, "nif": 7, "no_answer_text": 7, "ask": 7, "usual": 7, "measur": 7, "few": 7, "minu": 7, "system": 7, "contribut": [7, 9], "quot": [7, 9], "util": 8, "fetch": 8, "contrari": 8, "west": 8, "east": 8, "reunit": 8, "1990": 8, "titl": 8, "did": 8, "must": 8, "fieldcondit": 8, "condit": 8, "qdrant_client": 8, "associ": 8, "country_facts_collect": 8, "summaris": 9, "section": 9, "bodi": 9, "strongli": 9, "verbal": 9, "stori": 9, "hate": 9, "pete": 9, "believ": 9, "thing": 9, "exist": 9, "128": 9, "subpackag": 10, "content": 10}, "objects": {"": [[1, 0, 0, "-", "intelligence_layer"]], "intelligence_layer": [[2, 0, 0, "-", "connectors"], [4, 0, 0, "-", "core"], [5, 0, 0, "-", "use_cases"]], "intelligence_layer.connectors": [[2, 0, 0, "-", "document_index"], [3, 0, 0, "-", "retrievers"]], "intelligence_layer.connectors.document_index": [[2, 1, 1, "", "DocumentIndex"]], "intelligence_layer.connectors.document_index.DocumentIndex": [[2, 2, 1, "", "add_document"], [2, 2, 1, "", "create_collection"], [2, 2, 1, "", "delete_collection"], [2, 2, 1, "", "delete_document"], [2, 2, 1, "", "get_document"], [2, 2, 1, "", "list_documents"], [2, 2, 1, "", "search"]], "intelligence_layer.connectors.retrievers": [[3, 0, 0, "-", "base_retriever"], [3, 0, 0, "-", "document_index_retriever"], [3, 0, 0, "-", "in_memory_retriever"]], "intelligence_layer.connectors.retrievers.base_retriever": [[3, 1, 1, "", "BaseRetriever"], [3, 1, 1, "", "Document"], [3, 1, 1, "", "SearchResult"]], "intelligence_layer.connectors.retrievers.base_retriever.BaseRetriever": [[3, 2, 1, "", "get_relevant_documents_with_scores"]], "intelligence_layer.connectors.retrievers.base_retriever.Document": [[3, 3, 1, "id0", "metadata"], [3, 3, 1, "", "model_config"], [3, 3, 1, "", "model_fields"], [3, 3, 1, "id3", "text"]], "intelligence_layer.connectors.retrievers.base_retriever.SearchResult": [[3, 3, 1, "id4", "document"], [3, 3, 1, "", "model_config"], [3, 3, 1, "", "model_fields"], [3, 3, 1, "id5", "score"]], "intelligence_layer.connectors.retrievers.document_index_retriever": [[3, 1, 1, "", "DocumentIndexRetriever"]], "intelligence_layer.connectors.retrievers.document_index_retriever.DocumentIndexRetriever": [[3, 2, 1, "", "get_relevant_documents_with_scores"]], "intelligence_layer.connectors.retrievers.in_memory_retriever": [[3, 1, 1, "", "InMemoryRetriever"], [3, 1, 1, "", "RetrieverType"]], "intelligence_layer.connectors.retrievers.in_memory_retriever.InMemoryRetriever": [[3, 3, 1, "", "MAX_WORKERS"], [3, 2, 1, "", "get_filtered_documents_with_scores"], [3, 2, 1, "", "get_relevant_documents_with_scores"]], "intelligence_layer.connectors.retrievers.in_memory_retriever.RetrieverType": [[3, 3, 1, "id6", "ASYMMETRIC"], [3, 3, 1, "id7", "SYMMETRIC"]], "intelligence_layer.core": [[4, 0, 0, "-", "complete"], [4, 0, 0, "-", "echo"], [4, 0, 0, "-", "evaluator"], [4, 0, 0, "-", "explain"], [4, 0, 0, "-", "logger"], [4, 0, 0, "-", "prompt_template"], [4, 0, 0, "-", "task"], [4, 0, 0, "-", "text_highlight"]], "intelligence_layer.core.complete": [[4, 1, 1, "", "Complete"], [4, 1, 1, "", "CompleteInput"], [4, 1, 1, "", "CompleteOutput"], [4, 1, 1, "", "Instruct"], [4, 1, 1, "", "InstructInput"], [4, 1, 1, "", "InstructOutput"]], "intelligence_layer.core.complete.Complete": [[4, 2, 1, "", "run"]], "intelligence_layer.core.complete.CompleteInput": [[4, 3, 1, "id0", "model"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id1", "request"]], "intelligence_layer.core.complete.CompleteOutput": [[4, 4, 1, "", "completion"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id2", "response"]], "intelligence_layer.core.complete.Instruct": [[4, 3, 1, "id3", "INSTRUCTION_PROMPT_TEMPLATE"], [4, 2, 1, "", "run"]], "intelligence_layer.core.complete.InstructInput": [[4, 3, 1, "id4", "input"], [4, 3, 1, "id5", "instruction"], [4, 3, 1, "id6", "maximum_response_tokens"], [4, 3, 1, "id7", "model"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id8", "response_prefix"]], "intelligence_layer.core.complete.InstructOutput": [[4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id9", "prompt_with_metadata"], [4, 3, 1, "id10", "response"]], "intelligence_layer.core.echo": [[4, 1, 1, "", "EchoInput"], [4, 1, 1, "", "EchoOutput"], [4, 1, 1, "", "EchoTask"], [4, 1, 1, "", "TokenWithProb"]], "intelligence_layer.core.echo.EchoInput": [[4, 3, 1, "id11", "expected_completion"], [4, 3, 1, "id12", "model"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id13", "prompt"]], "intelligence_layer.core.echo.EchoOutput": [[4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id14", "tokens_with_log_probs"]], "intelligence_layer.core.echo.EchoTask": [[4, 3, 1, "", "PROMPT_TEMPLATE"], [4, 2, 1, "", "run"]], "intelligence_layer.core.echo.TokenWithProb": [[4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "", "prob"], [4, 3, 1, "", "token"]], "intelligence_layer.core.evaluator": [[4, 1, 1, "", "Dataset"], [4, 1, 1, "", "Evaluator"], [4, 1, 1, "", "Example"]], "intelligence_layer.core.evaluator.Dataset": [[4, 3, 1, "id15", "examples"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id16", "name"]], "intelligence_layer.core.evaluator.Evaluator": [[4, 2, 1, "", "aggregate"], [4, 2, 1, "", "evaluate"], [4, 2, 1, "", "evaluate_dataset"]], "intelligence_layer.core.evaluator.Example": [[4, 3, 1, "id17", "expected_output"], [4, 3, 1, "id18", "ident"], [4, 3, 1, "id19", "input"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"]], "intelligence_layer.core.explain": [[4, 1, 1, "", "Explain"], [4, 1, 1, "", "ExplainInput"], [4, 1, 1, "", "ExplainOutput"]], "intelligence_layer.core.explain.Explain": [[4, 2, 1, "", "run"]], "intelligence_layer.core.explain.ExplainInput": [[4, 3, 1, "id20", "model"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id21", "request"]], "intelligence_layer.core.explain.ExplainOutput": [[4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id22", "response"]], "intelligence_layer.core.logger": [[4, 1, 1, "", "DebugLogger"], [4, 1, 1, "", "EndSpan"], [4, 1, 1, "", "EndTask"], [4, 1, 1, "", "FileDebugLogger"], [4, 1, 1, "", "FileSpan"], [4, 1, 1, "", "FileTaskSpan"], [4, 1, 1, "", "InMemoryDebugLogger"], [4, 1, 1, "", "InMemorySpan"], [4, 1, 1, "", "InMemoryTaskSpan"], [4, 1, 1, "", "JsonSerializer"], [4, 1, 1, "", "LogEntry"], [4, 1, 1, "", "LogLine"], [4, 1, 1, "", "NoOpDebugLogger"], [4, 1, 1, "", "NoOpTaskSpan"], [4, 1, 1, "", "PlainEntry"], [4, 1, 1, "", "Span"], [4, 1, 1, "", "StartSpan"], [4, 1, 1, "", "StartTask"], [4, 1, 1, "", "TaskSpan"]], "intelligence_layer.core.logger.DebugLogger": [[4, 2, 1, "", "log"], [4, 2, 1, "", "span"], [4, 2, 1, "", "task_span"]], "intelligence_layer.core.logger.EndSpan": [[4, 3, 1, "id25", "end"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id26", "uuid"]], "intelligence_layer.core.logger.EndTask": [[4, 3, 1, "id27", "end"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id28", "output"], [4, 3, 1, "id29", "uuid"]], "intelligence_layer.core.logger.FileDebugLogger": [[4, 2, 1, "", "log"], [4, 2, 1, "", "span"], [4, 2, 1, "", "task_span"], [4, 3, 1, "", "uuid"]], "intelligence_layer.core.logger.FileTaskSpan": [[4, 3, 1, "", "output"], [4, 2, 1, "", "record_output"]], "intelligence_layer.core.logger.InMemoryDebugLogger": [[4, 2, 1, "", "log"], [4, 3, 1, "id32", "logs"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id33", "name"], [4, 2, 1, "", "span"], [4, 2, 1, "", "task_span"]], "intelligence_layer.core.logger.InMemorySpan": [[4, 3, 1, "", "end_timestamp"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "", "start_timestamp"]], "intelligence_layer.core.logger.InMemoryTaskSpan": [[4, 3, 1, "", "input"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "", "output"], [4, 2, 1, "", "record_output"]], "intelligence_layer.core.logger.JsonSerializer": [[4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "", "root"]], "intelligence_layer.core.logger.LogEntry": [[4, 3, 1, "id34", "message"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id35", "timestamp"], [4, 3, 1, "id36", "value"]], "intelligence_layer.core.logger.LogLine": [[4, 3, 1, "id37", "entry"], [4, 3, 1, "id38", "entry_type"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"]], "intelligence_layer.core.logger.NoOpDebugLogger": [[4, 2, 1, "", "log"], [4, 2, 1, "", "span"], [4, 2, 1, "", "task_span"]], "intelligence_layer.core.logger.NoOpTaskSpan": [[4, 2, 1, "", "record_output"]], "intelligence_layer.core.logger.PlainEntry": [[4, 3, 1, "id39", "message"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id40", "parent"], [4, 3, 1, "id41", "timestamp"], [4, 3, 1, "id42", "value"]], "intelligence_layer.core.logger.StartSpan": [[4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id43", "name"], [4, 3, 1, "id44", "parent"], [4, 3, 1, "id45", "start"], [4, 3, 1, "id46", "uuid"]], "intelligence_layer.core.logger.StartTask": [[4, 3, 1, "id47", "input"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id48", "name"], [4, 3, 1, "id49", "parent"], [4, 3, 1, "id50", "start"], [4, 3, 1, "id51", "uuid"]], "intelligence_layer.core.logger.TaskSpan": [[4, 2, 1, "", "record_output"]], "intelligence_layer.core.prompt_template": [[4, 1, 1, "", "PromptItemCursor"], [4, 1, 1, "", "PromptRange"], [4, 1, 1, "", "PromptRangeContext"], [4, 1, 1, "", "PromptRangeNode"], [4, 1, 1, "", "PromptRangeTag"], [4, 1, 1, "", "PromptTemplate"], [4, 1, 1, "", "PromptWithMetadata"], [4, 1, 1, "", "TextCursor"]], "intelligence_layer.core.prompt_template.PromptItemCursor": [[4, 3, 1, "", "item"]], "intelligence_layer.core.prompt_template.PromptRange": [[4, 3, 1, "", "end"], [4, 3, 1, "", "start"]], "intelligence_layer.core.prompt_template.PromptRangeContext": [[4, 2, 1, "", "add_placeholder_range"], [4, 3, 1, "", "autoescape"], [4, 3, 1, "", "counters"], [4, 3, 1, "", "disabled_tags"], [4, 3, 1, "", "env"], [4, 3, 1, "", "globals"], [4, 3, 1, "", "local_namespace_size_carry"], [4, 3, 1, "", "locals"], [4, 3, 1, "", "loop_iteration_carry"], [4, 3, 1, "", "loops"], [4, 3, 1, "", "parent_context"], [4, 2, 1, "", "placeholder_range_names"], [4, 3, 1, "", "scope"], [4, 3, 1, "", "tag_namespace"], [4, 3, 1, "", "template"]], "intelligence_layer.core.prompt_template.PromptRangeNode": [[4, 2, 1, "", "render_to_output"]], "intelligence_layer.core.prompt_template.PromptRangeTag": [[4, 3, 1, "", "end"], [4, 3, 1, "", "name"], [4, 2, 1, "", "parse"]], "intelligence_layer.core.prompt_template.PromptTemplate": [[4, 2, 1, "", "embed_prompt"], [4, 2, 1, "", "placeholder"], [4, 2, 1, "", "to_prompt"], [4, 2, 1, "", "to_prompt_with_metadata"]], "intelligence_layer.core.prompt_template.PromptWithMetadata": [[4, 3, 1, "", "prompt"], [4, 3, 1, "", "ranges"]], "intelligence_layer.core.prompt_template.TextCursor": [[4, 3, 1, "", "item"], [4, 3, 1, "", "position"]], "intelligence_layer.core.task": [[4, 1, 1, "", "Chunk"], [4, 1, 1, "", "Input"], [4, 1, 1, "", "Output"], [4, 1, 1, "", "Task"], [4, 1, 1, "", "Token"], [4, 5, 1, "", "batched"]], "intelligence_layer.core.task.Task": [[4, 2, 1, "", "run"], [4, 2, 1, "", "run_concurrently"]], "intelligence_layer.core.task.Token": [[4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "", "token"], [4, 3, 1, "", "token_id"]], "intelligence_layer.core.text_highlight": [[4, 1, 1, "", "ScoredTextHighlight"], [4, 1, 1, "", "TextHighlight"], [4, 1, 1, "", "TextHighlightInput"], [4, 1, 1, "", "TextHighlightOutput"]], "intelligence_layer.core.text_highlight.ScoredTextHighlight": [[4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id58", "score"], [4, 3, 1, "id59", "text"]], "intelligence_layer.core.text_highlight.TextHighlight": [[4, 2, 1, "", "run"]], "intelligence_layer.core.text_highlight.TextHighlightInput": [[4, 3, 1, "id60", "focus_ranges"], [4, 3, 1, "id61", "model"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"], [4, 3, 1, "id62", "prompt_with_metadata"], [4, 3, 1, "id63", "target"]], "intelligence_layer.core.text_highlight.TextHighlightOutput": [[4, 3, 1, "id64", "highlights"], [4, 3, 1, "", "model_config"], [4, 3, 1, "", "model_fields"]], "intelligence_layer.use_cases": [[6, 0, 0, "-", "classify"], [7, 0, 0, "-", "qa"], [8, 0, 0, "-", "search"], [9, 0, 0, "-", "summarize"]], "intelligence_layer.use_cases.classify": [[6, 0, 0, "-", "classify"], [6, 0, 0, "-", "embedding_based_classify"], [6, 0, 0, "-", "single_label_classify"]], "intelligence_layer.use_cases.classify.classify": [[6, 1, 1, "", "AggregatedClassifyEvaluation"], [6, 1, 1, "", "Classify"], [6, 1, 1, "", "ClassifyEvaluation"], [6, 1, 1, "", "ClassifyEvaluator"], [6, 1, 1, "", "ClassifyInput"], [6, 1, 1, "", "ClassifyOutput"]], "intelligence_layer.use_cases.classify.classify.AggregatedClassifyEvaluation": [[6, 3, 1, "", "evaluation"], [6, 3, 1, "", "evaluations"], [6, 3, 1, "", "model_config"], [6, 3, 1, "", "model_fields"], [6, 3, 1, "id0", "percentage_correct"]], "intelligence_layer.use_cases.classify.classify.Classify": [[6, 2, 1, "", "run"]], "intelligence_layer.use_cases.classify.classify.ClassifyEvaluation": [[6, 3, 1, "id1", "correct"], [6, 3, 1, "", "model_config"], [6, 3, 1, "", "model_fields"], [6, 3, 1, "id2", "output"]], "intelligence_layer.use_cases.classify.classify.ClassifyEvaluator": [[6, 2, 1, "", "aggregate"], [6, 2, 1, "", "evaluate"]], "intelligence_layer.use_cases.classify.classify.ClassifyInput": [[6, 3, 1, "id3", "chunk"], [6, 3, 1, "id4", "labels"], [6, 3, 1, "", "model_config"], [6, 3, 1, "", "model_fields"]], "intelligence_layer.use_cases.classify.classify.ClassifyOutput": [[6, 3, 1, "", "model_config"], [6, 3, 1, "", "model_fields"], [6, 3, 1, "id5", "scores"]], "intelligence_layer.use_cases.classify.embedding_based_classify": [[6, 1, 1, "", "EmbeddingBasedClassify"], [6, 1, 1, "", "EmbeddingBasedClassifyScoring"], [6, 1, 1, "", "LabelWithExamples"]], "intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassify": [[6, 3, 1, "id6", "METADATA_LABEL_NAME"], [6, 2, 1, "", "run"]], "intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassifyScoring": [[6, 3, 1, "id7", "MAX"], [6, 3, 1, "id8", "MEAN_TOP_5"]], "intelligence_layer.use_cases.classify.embedding_based_classify.LabelWithExamples": [[6, 3, 1, "id9", "examples"], [6, 3, 1, "", "model_config"], [6, 3, 1, "", "model_fields"], [6, 3, 1, "id10", "name"]], "intelligence_layer.use_cases.classify.single_label_classify": [[6, 1, 1, "", "SingleLabelClassify"], [6, 1, 1, "", "TreeNode"], [6, 5, 1, "", "to_aa_tokens_prompt"]], "intelligence_layer.use_cases.classify.single_label_classify.SingleLabelClassify": [[6, 3, 1, "id11", "MODEL"], [6, 3, 1, "", "PROMPT_TEMPLATE"], [6, 3, 1, "", "PROMPT_TEMPLATE_STR"], [6, 2, 1, "", "run"]], "intelligence_layer.use_cases.classify.single_label_classify.TreeNode": [[6, 2, 1, "", "find_child"], [6, 2, 1, "", "insert_path"], [6, 2, 1, "", "insert_without_calculation"], [6, 2, 1, "", "normalize_probs"], [6, 2, 1, "", "path"]], "intelligence_layer.use_cases.qa": [[7, 0, 0, "-", "long_context_qa"], [7, 0, 0, "-", "multiple_chunk_qa"], [7, 0, 0, "-", "retriever_based_qa"], [7, 0, 0, "-", "single_chunk_qa"]], "intelligence_layer.use_cases.qa.long_context_qa": [[7, 1, 1, "", "LongContextQa"], [7, 1, 1, "", "LongContextQaInput"]], "intelligence_layer.use_cases.qa.long_context_qa.LongContextQa": [[7, 2, 1, "", "run"]], "intelligence_layer.use_cases.qa.long_context_qa.LongContextQaInput": [[7, 3, 1, "", "model_config"], [7, 3, 1, "", "model_fields"], [7, 3, 1, "id0", "question"], [7, 3, 1, "id1", "text"]], "intelligence_layer.use_cases.qa.multiple_chunk_qa": [[7, 1, 1, "", "MultipleChunkQa"], [7, 1, 1, "", "MultipleChunkQaInput"], [7, 1, 1, "", "MultipleChunkQaOutput"], [7, 1, 1, "", "Subanswer"]], "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQa": [[7, 3, 1, "id2", "MERGE_ANSWERS_INSTRUCTION"], [7, 2, 1, "", "run"]], "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaInput": [[7, 3, 1, "id3", "chunks"], [7, 3, 1, "", "model_config"], [7, 3, 1, "", "model_fields"], [7, 3, 1, "id4", "question"]], "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaOutput": [[7, 3, 1, "id5", "answer"], [7, 3, 1, "", "model_config"], [7, 3, 1, "", "model_fields"], [7, 3, 1, "id6", "subanswers"]], "intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer": [[7, 3, 1, "id7", "answer"], [7, 3, 1, "id8", "chunk"], [7, 3, 1, "id9", "highlights"], [7, 3, 1, "", "model_config"], [7, 3, 1, "", "model_fields"]], "intelligence_layer.use_cases.qa.retriever_based_qa": [[7, 1, 1, "", "RetrieverBasedQa"], [7, 1, 1, "", "RetrieverBasedQaInput"]], "intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQa": [[7, 2, 1, "", "run"]], "intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQaInput": [[7, 3, 1, "", "model_config"], [7, 3, 1, "", "model_fields"], [7, 3, 1, "id10", "question"]], "intelligence_layer.use_cases.qa.single_chunk_qa": [[7, 1, 1, "", "SingleChunkQa"], [7, 1, 1, "", "SingleChunkQaInput"], [7, 1, 1, "", "SingleChunkQaOutput"]], "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQa": [[7, 3, 1, "id11", "NO_ANSWER_STR"], [7, 3, 1, "id12", "PROMPT_TEMPLATE_STR"], [7, 2, 1, "", "run"]], "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaInput": [[7, 3, 1, "id13", "chunk"], [7, 3, 1, "", "model_config"], [7, 3, 1, "", "model_fields"], [7, 3, 1, "id14", "question"]], "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaOutput": [[7, 3, 1, "id15", "answer"], [7, 3, 1, "id16", "highlights"], [7, 3, 1, "", "model_config"], [7, 3, 1, "", "model_fields"]], "intelligence_layer.use_cases.search": [[8, 0, 0, "-", "filter_search"], [8, 0, 0, "-", "search"]], "intelligence_layer.use_cases.search.filter_search": [[8, 1, 1, "", "FilterSearch"], [8, 1, 1, "", "FilterSearchInput"]], "intelligence_layer.use_cases.search.filter_search.FilterSearch": [[8, 2, 1, "", "run"]], "intelligence_layer.use_cases.search.filter_search.FilterSearchInput": [[8, 3, 1, "id0", "filter"], [8, 3, 1, "", "model_config"], [8, 3, 1, "", "model_fields"], [8, 3, 1, "id1", "query"]], "intelligence_layer.use_cases.search.search": [[8, 1, 1, "", "Search"], [8, 1, 1, "", "SearchInput"], [8, 1, 1, "", "SearchOutput"]], "intelligence_layer.use_cases.search.search.Search": [[8, 2, 1, "", "run"]], "intelligence_layer.use_cases.search.search.SearchInput": [[8, 3, 1, "", "model_config"], [8, 3, 1, "", "model_fields"], [8, 3, 1, "id2", "query"]], "intelligence_layer.use_cases.search.search.SearchOutput": [[8, 3, 1, "", "model_config"], [8, 3, 1, "", "model_fields"], [8, 3, 1, "id3", "results"]], "intelligence_layer.use_cases.summarize": [[9, 0, 0, "-", "summarize"]], "intelligence_layer.use_cases.summarize.summarize": [[9, 1, 1, "", "ShortBodySummarize"], [9, 1, 1, "", "SummarizeInput"], [9, 1, 1, "", "SummarizeOutput"]], "intelligence_layer.use_cases.summarize.summarize.ShortBodySummarize": [[9, 3, 1, "id0", "INSTRUCTION"], [9, 3, 1, "id1", "MAXIMUM_RESPONSE_TOKENS"], [9, 2, 1, "", "run"]], "intelligence_layer.use_cases.summarize.summarize.SummarizeInput": [[9, 3, 1, "id2", "chunk"], [9, 3, 1, "", "model_config"], [9, 3, 1, "", "model_fields"]], "intelligence_layer.use_cases.summarize.summarize.SummarizeOutput": [[9, 3, 1, "id3", "highlights"], [9, 3, 1, "", "model_config"], [9, 3, 1, "", "model_fields"], [9, 3, 1, "id4", "summary"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:attribute", "4": "py:property", "5": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "attribute", "Python attribute"], "4": ["py", "property", "Python property"], "5": ["py", "function", "Python function"]}, "titleterms": {"welcom": 0, "intellig": 0, "layer": 0, "": 0, "document": 0, "content": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "indic": 0, "tabl": 0, "intelligence_lay": [1, 2, 3, 4, 5, 6, 7, 8, 9], "packag": [1, 2, 3, 4, 5, 6, 7, 8, 9], "subpackag": [1, 2, 5], "modul": [1, 2, 3, 4, 5, 6, 7, 8, 9], "connector": [2, 3], "submodul": [2, 3, 4, 6, 7, 8, 9], "document_index": 2, "retriev": 3, "base_retriev": 3, "document_index_retriev": 3, "in_memory_retriev": 3, "core": 4, "complet": 4, "echo": 4, "evalu": 4, "explain": 4, "logger": 4, "prompt_templ": 4, "task": 4, "text_highlight": 4, "use_cas": [5, 6, 7, 8, 9], "classifi": 6, "embedding_based_classifi": 6, "single_label_classifi": 6, "qa": 7, "long_context_qa": 7, "multiple_chunk_qa": 7, "retriever_based_qa": 7, "single_chunk_qa": 7, "search": 8, "filter_search": 8, "summar": 9, "src": 10}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1, "sphinx": 60}, "alltitles": {"Welcome to Intelligence Layer\u2019s documentation!": [[0, "welcome-to-intelligence-layer-s-documentation"]], "Contents:": [[0, null]], "Indices and tables": [[0, "indices-and-tables"]], "intelligence_layer package": [[1, "intelligence-layer-package"]], "Subpackages": [[1, "subpackages"], [2, "subpackages"], [5, "subpackages"]], "Module contents": [[1, "module-intelligence_layer"], [2, "module-intelligence_layer.connectors"], [3, "module-intelligence_layer.connectors.retrievers"], [4, "module-intelligence_layer.core"], [5, "module-intelligence_layer.use_cases"], [6, "module-intelligence_layer.use_cases.classify"], [7, "module-intelligence_layer.use_cases.qa"], [8, "module-intelligence_layer.use_cases.search"], [9, "module-intelligence_layer.use_cases.summarize"]], "intelligence_layer.connectors package": [[2, "intelligence-layer-connectors-package"]], "Submodules": [[2, "submodules"], [3, "submodules"], [4, "submodules"], [6, "submodules"], [7, "submodules"], [8, "submodules"], [9, "submodules"]], "intelligence_layer.connectors.document_index module": [[2, "module-intelligence_layer.connectors.document_index"]], "intelligence_layer.connectors.retrievers package": [[3, "intelligence-layer-connectors-retrievers-package"]], "intelligence_layer.connectors.retrievers.base_retriever module": [[3, "module-intelligence_layer.connectors.retrievers.base_retriever"]], "intelligence_layer.connectors.retrievers.document_index_retriever module": [[3, "module-intelligence_layer.connectors.retrievers.document_index_retriever"]], "intelligence_layer.connectors.retrievers.in_memory_retriever module": [[3, "module-intelligence_layer.connectors.retrievers.in_memory_retriever"]], "intelligence_layer.core package": [[4, "intelligence-layer-core-package"]], "intelligence_layer.core.complete module": [[4, "module-intelligence_layer.core.complete"]], "intelligence_layer.core.echo module": [[4, "module-intelligence_layer.core.echo"]], "intelligence_layer.core.evaluator module": [[4, "module-intelligence_layer.core.evaluator"]], "intelligence_layer.core.explain module": [[4, "module-intelligence_layer.core.explain"]], "intelligence_layer.core.logger module": [[4, "module-intelligence_layer.core.logger"]], "intelligence_layer.core.prompt_template module": [[4, "module-intelligence_layer.core.prompt_template"]], "intelligence_layer.core.task module": [[4, "module-intelligence_layer.core.task"]], "intelligence_layer.core.text_highlight module": [[4, "module-intelligence_layer.core.text_highlight"]], "intelligence_layer.use_cases package": [[5, "intelligence-layer-use-cases-package"]], "intelligence_layer.use_cases.classify package": [[6, "intelligence-layer-use-cases-classify-package"]], "intelligence_layer.use_cases.classify.classify module": [[6, "module-intelligence_layer.use_cases.classify.classify"]], "intelligence_layer.use_cases.classify.embedding_based_classify module": [[6, "module-intelligence_layer.use_cases.classify.embedding_based_classify"]], "intelligence_layer.use_cases.classify.single_label_classify module": [[6, "module-intelligence_layer.use_cases.classify.single_label_classify"]], "intelligence_layer.use_cases.qa package": [[7, "intelligence-layer-use-cases-qa-package"]], "intelligence_layer.use_cases.qa.long_context_qa module": [[7, "module-intelligence_layer.use_cases.qa.long_context_qa"]], "intelligence_layer.use_cases.qa.multiple_chunk_qa module": [[7, "module-intelligence_layer.use_cases.qa.multiple_chunk_qa"]], "intelligence_layer.use_cases.qa.retriever_based_qa module": [[7, "module-intelligence_layer.use_cases.qa.retriever_based_qa"]], "intelligence_layer.use_cases.qa.single_chunk_qa module": [[7, "module-intelligence_layer.use_cases.qa.single_chunk_qa"]], "intelligence_layer.use_cases.search package": [[8, "intelligence-layer-use-cases-search-package"]], "intelligence_layer.use_cases.search.filter_search module": [[8, "module-intelligence_layer.use_cases.search.filter_search"]], "intelligence_layer.use_cases.search.search module": [[8, "module-intelligence_layer.use_cases.search.search"]], "intelligence_layer.use_cases.summarize package": [[9, "intelligence-layer-use-cases-summarize-package"]], "intelligence_layer.use_cases.summarize.summarize module": [[9, "module-intelligence_layer.use_cases.summarize.summarize"]], "src": [[10, "src"]]}, "indexentries": {"intelligence_layer": [[1, "module-intelligence_layer"]], "module": [[1, "module-intelligence_layer"], [2, "module-intelligence_layer.connectors"], [2, "module-intelligence_layer.connectors.document_index"], [3, "module-intelligence_layer.connectors.retrievers"], [3, "module-intelligence_layer.connectors.retrievers.base_retriever"], [3, "module-intelligence_layer.connectors.retrievers.document_index_retriever"], [3, "module-intelligence_layer.connectors.retrievers.in_memory_retriever"], [4, "module-intelligence_layer.core"], [4, "module-intelligence_layer.core.complete"], [4, "module-intelligence_layer.core.echo"], [4, "module-intelligence_layer.core.evaluator"], [4, "module-intelligence_layer.core.explain"], [4, "module-intelligence_layer.core.logger"], [4, "module-intelligence_layer.core.prompt_template"], [4, "module-intelligence_layer.core.task"], [4, "module-intelligence_layer.core.text_highlight"], [5, "module-intelligence_layer.use_cases"], [6, "module-intelligence_layer.use_cases.classify"], [6, "module-intelligence_layer.use_cases.classify.classify"], [6, "module-intelligence_layer.use_cases.classify.embedding_based_classify"], [6, "module-intelligence_layer.use_cases.classify.single_label_classify"], [7, "module-intelligence_layer.use_cases.qa"], [7, "module-intelligence_layer.use_cases.qa.long_context_qa"], [7, "module-intelligence_layer.use_cases.qa.multiple_chunk_qa"], [7, "module-intelligence_layer.use_cases.qa.retriever_based_qa"], [7, "module-intelligence_layer.use_cases.qa.single_chunk_qa"], [8, "module-intelligence_layer.use_cases.search"], [8, "module-intelligence_layer.use_cases.search.filter_search"], [8, "module-intelligence_layer.use_cases.search.search"], [9, "module-intelligence_layer.use_cases.summarize"], [9, "module-intelligence_layer.use_cases.summarize.summarize"]], "documentindex (class in intelligence_layer.connectors.document_index)": [[2, "intelligence_layer.connectors.document_index.DocumentIndex"]], "add_document() (intelligence_layer.connectors.document_index.documentindex method)": [[2, "intelligence_layer.connectors.document_index.DocumentIndex.add_document"]], "create_collection() (intelligence_layer.connectors.document_index.documentindex method)": [[2, "intelligence_layer.connectors.document_index.DocumentIndex.create_collection"]], "delete_collection() (intelligence_layer.connectors.document_index.documentindex method)": [[2, "intelligence_layer.connectors.document_index.DocumentIndex.delete_collection"]], "delete_document() (intelligence_layer.connectors.document_index.documentindex method)": [[2, "intelligence_layer.connectors.document_index.DocumentIndex.delete_document"]], "get_document() (intelligence_layer.connectors.document_index.documentindex method)": [[2, "intelligence_layer.connectors.document_index.DocumentIndex.get_document"]], "intelligence_layer.connectors": [[2, "module-intelligence_layer.connectors"]], "intelligence_layer.connectors.document_index": [[2, "module-intelligence_layer.connectors.document_index"]], "list_documents() (intelligence_layer.connectors.document_index.documentindex method)": [[2, "intelligence_layer.connectors.document_index.DocumentIndex.list_documents"]], "search() (intelligence_layer.connectors.document_index.documentindex method)": [[2, "intelligence_layer.connectors.document_index.DocumentIndex.search"]], "asymmetric (intelligence_layer.connectors.retrievers.in_memory_retriever.retrievertype attribute)": [[3, "id6"], [3, "intelligence_layer.connectors.retrievers.in_memory_retriever.RetrieverType.ASYMMETRIC"]], "baseretriever (class in intelligence_layer.connectors.retrievers.base_retriever)": [[3, "intelligence_layer.connectors.retrievers.base_retriever.BaseRetriever"]], "document (class in intelligence_layer.connectors.retrievers.base_retriever)": [[3, "intelligence_layer.connectors.retrievers.base_retriever.Document"]], "documentindexretriever (class in intelligence_layer.connectors.retrievers.document_index_retriever)": [[3, "intelligence_layer.connectors.retrievers.document_index_retriever.DocumentIndexRetriever"]], "inmemoryretriever (class in intelligence_layer.connectors.retrievers.in_memory_retriever)": [[3, "intelligence_layer.connectors.retrievers.in_memory_retriever.InMemoryRetriever"]], "max_workers (intelligence_layer.connectors.retrievers.in_memory_retriever.inmemoryretriever attribute)": [[3, "intelligence_layer.connectors.retrievers.in_memory_retriever.InMemoryRetriever.MAX_WORKERS"]], "retrievertype (class in intelligence_layer.connectors.retrievers.in_memory_retriever)": [[3, "intelligence_layer.connectors.retrievers.in_memory_retriever.RetrieverType"]], "symmetric (intelligence_layer.connectors.retrievers.in_memory_retriever.retrievertype attribute)": [[3, "id7"], [3, "intelligence_layer.connectors.retrievers.in_memory_retriever.RetrieverType.SYMMETRIC"]], "searchresult (class in intelligence_layer.connectors.retrievers.base_retriever)": [[3, "intelligence_layer.connectors.retrievers.base_retriever.SearchResult"]], "document (intelligence_layer.connectors.retrievers.base_retriever.searchresult attribute)": [[3, "id4"], [3, "intelligence_layer.connectors.retrievers.base_retriever.SearchResult.document"]], "get_filtered_documents_with_scores() (intelligence_layer.connectors.retrievers.in_memory_retriever.inmemoryretriever method)": [[3, "intelligence_layer.connectors.retrievers.in_memory_retriever.InMemoryRetriever.get_filtered_documents_with_scores"]], "get_relevant_documents_with_scores() (intelligence_layer.connectors.retrievers.base_retriever.baseretriever method)": [[3, "intelligence_layer.connectors.retrievers.base_retriever.BaseRetriever.get_relevant_documents_with_scores"]], "get_relevant_documents_with_scores() (intelligence_layer.connectors.retrievers.document_index_retriever.documentindexretriever method)": [[3, "intelligence_layer.connectors.retrievers.document_index_retriever.DocumentIndexRetriever.get_relevant_documents_with_scores"]], "get_relevant_documents_with_scores() (intelligence_layer.connectors.retrievers.in_memory_retriever.inmemoryretriever method)": [[3, "intelligence_layer.connectors.retrievers.in_memory_retriever.InMemoryRetriever.get_relevant_documents_with_scores"]], "intelligence_layer.connectors.retrievers": [[3, "module-intelligence_layer.connectors.retrievers"]], "intelligence_layer.connectors.retrievers.base_retriever": [[3, "module-intelligence_layer.connectors.retrievers.base_retriever"]], "intelligence_layer.connectors.retrievers.document_index_retriever": [[3, "module-intelligence_layer.connectors.retrievers.document_index_retriever"]], "intelligence_layer.connectors.retrievers.in_memory_retriever": [[3, "module-intelligence_layer.connectors.retrievers.in_memory_retriever"]], "metadata (intelligence_layer.connectors.retrievers.base_retriever.document attribute)": [[3, "id0"], [3, "intelligence_layer.connectors.retrievers.base_retriever.Document.metadata"]], "model_config (intelligence_layer.connectors.retrievers.base_retriever.document attribute)": [[3, "intelligence_layer.connectors.retrievers.base_retriever.Document.model_config"]], "model_config (intelligence_layer.connectors.retrievers.base_retriever.searchresult attribute)": [[3, "intelligence_layer.connectors.retrievers.base_retriever.SearchResult.model_config"]], "model_fields (intelligence_layer.connectors.retrievers.base_retriever.document attribute)": [[3, "intelligence_layer.connectors.retrievers.base_retriever.Document.model_fields"]], "model_fields (intelligence_layer.connectors.retrievers.base_retriever.searchresult attribute)": [[3, "intelligence_layer.connectors.retrievers.base_retriever.SearchResult.model_fields"]], "score (intelligence_layer.connectors.retrievers.base_retriever.searchresult attribute)": [[3, "id5"], [3, "intelligence_layer.connectors.retrievers.base_retriever.SearchResult.score"]], "text (intelligence_layer.connectors.retrievers.base_retriever.document attribute)": [[3, "id3"], [3, "intelligence_layer.connectors.retrievers.base_retriever.Document.text"]], "chunk (class in intelligence_layer.core.task)": [[4, "intelligence_layer.core.task.Chunk"]], "complete (class in intelligence_layer.core.complete)": [[4, "intelligence_layer.core.complete.Complete"]], "completeinput (class in intelligence_layer.core.complete)": [[4, "intelligence_layer.core.complete.CompleteInput"]], "completeoutput (class in intelligence_layer.core.complete)": [[4, "intelligence_layer.core.complete.CompleteOutput"]], "dataset (class in intelligence_layer.core.evaluator)": [[4, "intelligence_layer.core.evaluator.Dataset"]], "debuglogger (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.DebugLogger"]], "echoinput (class in intelligence_layer.core.echo)": [[4, "intelligence_layer.core.echo.EchoInput"]], "echooutput (class in intelligence_layer.core.echo)": [[4, "intelligence_layer.core.echo.EchoOutput"]], "echotask (class in intelligence_layer.core.echo)": [[4, "intelligence_layer.core.echo.EchoTask"]], "endspan (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.EndSpan"]], "endtask (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.EndTask"]], "evaluator (class in intelligence_layer.core.evaluator)": [[4, "intelligence_layer.core.evaluator.Evaluator"]], "example (class in intelligence_layer.core.evaluator)": [[4, "intelligence_layer.core.evaluator.Example"]], "explain (class in intelligence_layer.core.explain)": [[4, "intelligence_layer.core.explain.Explain"]], "explaininput (class in intelligence_layer.core.explain)": [[4, "intelligence_layer.core.explain.ExplainInput"]], "explainoutput (class in intelligence_layer.core.explain)": [[4, "intelligence_layer.core.explain.ExplainOutput"]], "filedebuglogger (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.FileDebugLogger"]], "filespan (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.FileSpan"]], "filetaskspan (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.FileTaskSpan"]], "instruction_prompt_template (intelligence_layer.core.complete.instruct attribute)": [[4, "id3"], [4, "intelligence_layer.core.complete.Instruct.INSTRUCTION_PROMPT_TEMPLATE"]], "inmemorydebuglogger (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.InMemoryDebugLogger"]], "inmemoryspan (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.InMemorySpan"]], "inmemorytaskspan (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.InMemoryTaskSpan"]], "input (class in intelligence_layer.core.task)": [[4, "intelligence_layer.core.task.Input"]], "instruct (class in intelligence_layer.core.complete)": [[4, "intelligence_layer.core.complete.Instruct"]], "instructinput (class in intelligence_layer.core.complete)": [[4, "intelligence_layer.core.complete.InstructInput"]], "instructoutput (class in intelligence_layer.core.complete)": [[4, "intelligence_layer.core.complete.InstructOutput"]], "jsonserializer (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.JsonSerializer"]], "logentry (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.LogEntry"]], "logline (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.LogLine"]], "noopdebuglogger (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.NoOpDebugLogger"]], "nooptaskspan (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.NoOpTaskSpan"]], "output (class in intelligence_layer.core.task)": [[4, "intelligence_layer.core.task.Output"]], "prompt_template (intelligence_layer.core.echo.echotask attribute)": [[4, "intelligence_layer.core.echo.EchoTask.PROMPT_TEMPLATE"]], "plainentry (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.PlainEntry"]], "promptitemcursor (class in intelligence_layer.core.prompt_template)": [[4, "intelligence_layer.core.prompt_template.PromptItemCursor"]], "promptrange (class in intelligence_layer.core.prompt_template)": [[4, "intelligence_layer.core.prompt_template.PromptRange"]], "promptrangecontext (class in intelligence_layer.core.prompt_template)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext"]], "promptrangenode (class in intelligence_layer.core.prompt_template)": [[4, "intelligence_layer.core.prompt_template.PromptRangeNode"]], "promptrangetag (class in intelligence_layer.core.prompt_template)": [[4, "intelligence_layer.core.prompt_template.PromptRangeTag"]], "prompttemplate (class in intelligence_layer.core.prompt_template)": [[4, "intelligence_layer.core.prompt_template.PromptTemplate"]], "promptwithmetadata (class in intelligence_layer.core.prompt_template)": [[4, "intelligence_layer.core.prompt_template.PromptWithMetadata"]], "scoredtexthighlight (class in intelligence_layer.core.text_highlight)": [[4, "intelligence_layer.core.text_highlight.ScoredTextHighlight"]], "span (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.Span"]], "startspan (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.StartSpan"]], "starttask (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.StartTask"]], "task (class in intelligence_layer.core.task)": [[4, "intelligence_layer.core.task.Task"]], "taskspan (class in intelligence_layer.core.logger)": [[4, "intelligence_layer.core.logger.TaskSpan"]], "textcursor (class in intelligence_layer.core.prompt_template)": [[4, "intelligence_layer.core.prompt_template.TextCursor"]], "texthighlight (class in intelligence_layer.core.text_highlight)": [[4, "intelligence_layer.core.text_highlight.TextHighlight"]], "texthighlightinput (class in intelligence_layer.core.text_highlight)": [[4, "intelligence_layer.core.text_highlight.TextHighlightInput"]], "texthighlightoutput (class in intelligence_layer.core.text_highlight)": [[4, "intelligence_layer.core.text_highlight.TextHighlightOutput"]], "token (class in intelligence_layer.core.task)": [[4, "intelligence_layer.core.task.Token"]], "tokenwithprob (class in intelligence_layer.core.echo)": [[4, "intelligence_layer.core.echo.TokenWithProb"]], "add_placeholder_range() (intelligence_layer.core.prompt_template.promptrangecontext method)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.add_placeholder_range"]], "aggregate() (intelligence_layer.core.evaluator.evaluator method)": [[4, "intelligence_layer.core.evaluator.Evaluator.aggregate"]], "autoescape (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.autoescape"]], "batched() (in module intelligence_layer.core.task)": [[4, "intelligence_layer.core.task.batched"]], "completion (intelligence_layer.core.complete.completeoutput property)": [[4, "intelligence_layer.core.complete.CompleteOutput.completion"]], "counters (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.counters"]], "disabled_tags (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.disabled_tags"]], "embed_prompt() (intelligence_layer.core.prompt_template.prompttemplate method)": [[4, "intelligence_layer.core.prompt_template.PromptTemplate.embed_prompt"]], "end (intelligence_layer.core.logger.endspan attribute)": [[4, "id25"], [4, "intelligence_layer.core.logger.EndSpan.end"]], "end (intelligence_layer.core.logger.endtask attribute)": [[4, "id27"], [4, "intelligence_layer.core.logger.EndTask.end"]], "end (intelligence_layer.core.prompt_template.promptrange attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRange.end"]], "end (intelligence_layer.core.prompt_template.promptrangetag attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeTag.end"]], "end_timestamp (intelligence_layer.core.logger.inmemoryspan attribute)": [[4, "intelligence_layer.core.logger.InMemorySpan.end_timestamp"]], "entry (intelligence_layer.core.logger.logline attribute)": [[4, "id37"], [4, "intelligence_layer.core.logger.LogLine.entry"]], "entry_type (intelligence_layer.core.logger.logline attribute)": [[4, "id38"], [4, "intelligence_layer.core.logger.LogLine.entry_type"]], "env (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.env"]], "evaluate() (intelligence_layer.core.evaluator.evaluator method)": [[4, "intelligence_layer.core.evaluator.Evaluator.evaluate"]], "evaluate_dataset() (intelligence_layer.core.evaluator.evaluator method)": [[4, "intelligence_layer.core.evaluator.Evaluator.evaluate_dataset"]], "examples (intelligence_layer.core.evaluator.dataset attribute)": [[4, "id15"], [4, "intelligence_layer.core.evaluator.Dataset.examples"]], "expected_completion (intelligence_layer.core.echo.echoinput attribute)": [[4, "id11"], [4, "intelligence_layer.core.echo.EchoInput.expected_completion"]], "expected_output (intelligence_layer.core.evaluator.example attribute)": [[4, "id17"], [4, "intelligence_layer.core.evaluator.Example.expected_output"]], "focus_ranges (intelligence_layer.core.text_highlight.texthighlightinput attribute)": [[4, "id60"], [4, "intelligence_layer.core.text_highlight.TextHighlightInput.focus_ranges"]], "globals (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.globals"]], "highlights (intelligence_layer.core.text_highlight.texthighlightoutput attribute)": [[4, "id64"], [4, "intelligence_layer.core.text_highlight.TextHighlightOutput.highlights"]], "ident (intelligence_layer.core.evaluator.example attribute)": [[4, "id18"], [4, "intelligence_layer.core.evaluator.Example.ident"]], "input (intelligence_layer.core.complete.instructinput attribute)": [[4, "id4"], [4, "intelligence_layer.core.complete.InstructInput.input"]], "input (intelligence_layer.core.evaluator.example attribute)": [[4, "id19"], [4, "intelligence_layer.core.evaluator.Example.input"]], "input (intelligence_layer.core.logger.inmemorytaskspan attribute)": [[4, "intelligence_layer.core.logger.InMemoryTaskSpan.input"]], "input (intelligence_layer.core.logger.starttask attribute)": [[4, "id47"], [4, "intelligence_layer.core.logger.StartTask.input"]], "instruction (intelligence_layer.core.complete.instructinput attribute)": [[4, "id5"], [4, "intelligence_layer.core.complete.InstructInput.instruction"]], "intelligence_layer.core": [[4, "module-intelligence_layer.core"]], "intelligence_layer.core.complete": [[4, "module-intelligence_layer.core.complete"]], "intelligence_layer.core.echo": [[4, "module-intelligence_layer.core.echo"]], "intelligence_layer.core.evaluator": [[4, "module-intelligence_layer.core.evaluator"]], "intelligence_layer.core.explain": [[4, "module-intelligence_layer.core.explain"]], "intelligence_layer.core.logger": [[4, "module-intelligence_layer.core.logger"]], "intelligence_layer.core.prompt_template": [[4, "module-intelligence_layer.core.prompt_template"]], "intelligence_layer.core.task": [[4, "module-intelligence_layer.core.task"]], "intelligence_layer.core.text_highlight": [[4, "module-intelligence_layer.core.text_highlight"]], "item (intelligence_layer.core.prompt_template.promptitemcursor attribute)": [[4, "intelligence_layer.core.prompt_template.PromptItemCursor.item"]], "item (intelligence_layer.core.prompt_template.textcursor attribute)": [[4, "intelligence_layer.core.prompt_template.TextCursor.item"]], "local_namespace_size_carry (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.local_namespace_size_carry"]], "locals (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.locals"]], "log() (intelligence_layer.core.logger.debuglogger method)": [[4, "intelligence_layer.core.logger.DebugLogger.log"]], "log() (intelligence_layer.core.logger.filedebuglogger method)": [[4, "intelligence_layer.core.logger.FileDebugLogger.log"]], "log() (intelligence_layer.core.logger.inmemorydebuglogger method)": [[4, "intelligence_layer.core.logger.InMemoryDebugLogger.log"]], "log() (intelligence_layer.core.logger.noopdebuglogger method)": [[4, "intelligence_layer.core.logger.NoOpDebugLogger.log"]], "logs (intelligence_layer.core.logger.inmemorydebuglogger attribute)": [[4, "id32"], [4, "intelligence_layer.core.logger.InMemoryDebugLogger.logs"]], "loop_iteration_carry (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.loop_iteration_carry"]], "loops (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.loops"]], "maximum_response_tokens (intelligence_layer.core.complete.instructinput attribute)": [[4, "id6"], [4, "intelligence_layer.core.complete.InstructInput.maximum_response_tokens"]], "message (intelligence_layer.core.logger.logentry attribute)": [[4, "id34"], [4, "intelligence_layer.core.logger.LogEntry.message"]], "message (intelligence_layer.core.logger.plainentry attribute)": [[4, "id39"], [4, "intelligence_layer.core.logger.PlainEntry.message"]], "model (intelligence_layer.core.complete.completeinput attribute)": [[4, "id0"], [4, "intelligence_layer.core.complete.CompleteInput.model"]], "model (intelligence_layer.core.complete.instructinput attribute)": [[4, "id7"], [4, "intelligence_layer.core.complete.InstructInput.model"]], "model (intelligence_layer.core.echo.echoinput attribute)": [[4, "id12"], [4, "intelligence_layer.core.echo.EchoInput.model"]], "model (intelligence_layer.core.explain.explaininput attribute)": [[4, "id20"], [4, "intelligence_layer.core.explain.ExplainInput.model"]], "model (intelligence_layer.core.text_highlight.texthighlightinput attribute)": [[4, "id61"], [4, "intelligence_layer.core.text_highlight.TextHighlightInput.model"]], "model_config (intelligence_layer.core.complete.completeinput attribute)": [[4, "intelligence_layer.core.complete.CompleteInput.model_config"]], "model_config (intelligence_layer.core.complete.completeoutput attribute)": [[4, "intelligence_layer.core.complete.CompleteOutput.model_config"]], "model_config (intelligence_layer.core.complete.instructinput attribute)": [[4, "intelligence_layer.core.complete.InstructInput.model_config"]], "model_config (intelligence_layer.core.complete.instructoutput attribute)": [[4, "intelligence_layer.core.complete.InstructOutput.model_config"]], "model_config (intelligence_layer.core.echo.echoinput attribute)": [[4, "intelligence_layer.core.echo.EchoInput.model_config"]], "model_config (intelligence_layer.core.echo.echooutput attribute)": [[4, "intelligence_layer.core.echo.EchoOutput.model_config"]], "model_config (intelligence_layer.core.echo.tokenwithprob attribute)": [[4, "intelligence_layer.core.echo.TokenWithProb.model_config"]], "model_config (intelligence_layer.core.evaluator.dataset attribute)": [[4, "intelligence_layer.core.evaluator.Dataset.model_config"]], "model_config (intelligence_layer.core.evaluator.example attribute)": [[4, "intelligence_layer.core.evaluator.Example.model_config"]], "model_config (intelligence_layer.core.explain.explaininput attribute)": [[4, "intelligence_layer.core.explain.ExplainInput.model_config"]], "model_config (intelligence_layer.core.explain.explainoutput attribute)": [[4, "intelligence_layer.core.explain.ExplainOutput.model_config"]], "model_config (intelligence_layer.core.logger.endspan attribute)": [[4, "intelligence_layer.core.logger.EndSpan.model_config"]], "model_config (intelligence_layer.core.logger.endtask attribute)": [[4, "intelligence_layer.core.logger.EndTask.model_config"]], "model_config (intelligence_layer.core.logger.inmemorydebuglogger attribute)": [[4, "intelligence_layer.core.logger.InMemoryDebugLogger.model_config"]], "model_config (intelligence_layer.core.logger.inmemoryspan attribute)": [[4, "intelligence_layer.core.logger.InMemorySpan.model_config"]], "model_config (intelligence_layer.core.logger.inmemorytaskspan attribute)": [[4, "intelligence_layer.core.logger.InMemoryTaskSpan.model_config"]], "model_config (intelligence_layer.core.logger.jsonserializer attribute)": [[4, "intelligence_layer.core.logger.JsonSerializer.model_config"]], "model_config (intelligence_layer.core.logger.logentry attribute)": [[4, "intelligence_layer.core.logger.LogEntry.model_config"]], "model_config (intelligence_layer.core.logger.logline attribute)": [[4, "intelligence_layer.core.logger.LogLine.model_config"]], "model_config (intelligence_layer.core.logger.plainentry attribute)": [[4, "intelligence_layer.core.logger.PlainEntry.model_config"]], "model_config (intelligence_layer.core.logger.startspan attribute)": [[4, "intelligence_layer.core.logger.StartSpan.model_config"]], "model_config (intelligence_layer.core.logger.starttask attribute)": [[4, "intelligence_layer.core.logger.StartTask.model_config"]], "model_config (intelligence_layer.core.task.token attribute)": [[4, "intelligence_layer.core.task.Token.model_config"]], "model_config (intelligence_layer.core.text_highlight.scoredtexthighlight attribute)": [[4, "intelligence_layer.core.text_highlight.ScoredTextHighlight.model_config"]], "model_config (intelligence_layer.core.text_highlight.texthighlightinput attribute)": [[4, "intelligence_layer.core.text_highlight.TextHighlightInput.model_config"]], "model_config (intelligence_layer.core.text_highlight.texthighlightoutput attribute)": [[4, "intelligence_layer.core.text_highlight.TextHighlightOutput.model_config"]], "model_fields (intelligence_layer.core.complete.completeinput attribute)": [[4, "intelligence_layer.core.complete.CompleteInput.model_fields"]], "model_fields (intelligence_layer.core.complete.completeoutput attribute)": [[4, "intelligence_layer.core.complete.CompleteOutput.model_fields"]], "model_fields (intelligence_layer.core.complete.instructinput attribute)": [[4, "intelligence_layer.core.complete.InstructInput.model_fields"]], "model_fields (intelligence_layer.core.complete.instructoutput attribute)": [[4, "intelligence_layer.core.complete.InstructOutput.model_fields"]], "model_fields (intelligence_layer.core.echo.echoinput attribute)": [[4, "intelligence_layer.core.echo.EchoInput.model_fields"]], "model_fields (intelligence_layer.core.echo.echooutput attribute)": [[4, "intelligence_layer.core.echo.EchoOutput.model_fields"]], "model_fields (intelligence_layer.core.echo.tokenwithprob attribute)": [[4, "intelligence_layer.core.echo.TokenWithProb.model_fields"]], "model_fields (intelligence_layer.core.evaluator.dataset attribute)": [[4, "intelligence_layer.core.evaluator.Dataset.model_fields"]], "model_fields (intelligence_layer.core.evaluator.example attribute)": [[4, "intelligence_layer.core.evaluator.Example.model_fields"]], "model_fields (intelligence_layer.core.explain.explaininput attribute)": [[4, "intelligence_layer.core.explain.ExplainInput.model_fields"]], "model_fields (intelligence_layer.core.explain.explainoutput attribute)": [[4, "intelligence_layer.core.explain.ExplainOutput.model_fields"]], "model_fields (intelligence_layer.core.logger.endspan attribute)": [[4, "intelligence_layer.core.logger.EndSpan.model_fields"]], "model_fields (intelligence_layer.core.logger.endtask attribute)": [[4, "intelligence_layer.core.logger.EndTask.model_fields"]], "model_fields (intelligence_layer.core.logger.inmemorydebuglogger attribute)": [[4, "intelligence_layer.core.logger.InMemoryDebugLogger.model_fields"]], "model_fields (intelligence_layer.core.logger.inmemoryspan attribute)": [[4, "intelligence_layer.core.logger.InMemorySpan.model_fields"]], "model_fields (intelligence_layer.core.logger.inmemorytaskspan attribute)": [[4, "intelligence_layer.core.logger.InMemoryTaskSpan.model_fields"]], "model_fields (intelligence_layer.core.logger.jsonserializer attribute)": [[4, "intelligence_layer.core.logger.JsonSerializer.model_fields"]], "model_fields (intelligence_layer.core.logger.logentry attribute)": [[4, "intelligence_layer.core.logger.LogEntry.model_fields"]], "model_fields (intelligence_layer.core.logger.logline attribute)": [[4, "intelligence_layer.core.logger.LogLine.model_fields"]], "model_fields (intelligence_layer.core.logger.plainentry attribute)": [[4, "intelligence_layer.core.logger.PlainEntry.model_fields"]], "model_fields (intelligence_layer.core.logger.startspan attribute)": [[4, "intelligence_layer.core.logger.StartSpan.model_fields"]], "model_fields (intelligence_layer.core.logger.starttask attribute)": [[4, "intelligence_layer.core.logger.StartTask.model_fields"]], "model_fields (intelligence_layer.core.task.token attribute)": [[4, "intelligence_layer.core.task.Token.model_fields"]], "model_fields (intelligence_layer.core.text_highlight.scoredtexthighlight attribute)": [[4, "intelligence_layer.core.text_highlight.ScoredTextHighlight.model_fields"]], "model_fields (intelligence_layer.core.text_highlight.texthighlightinput attribute)": [[4, "intelligence_layer.core.text_highlight.TextHighlightInput.model_fields"]], "model_fields (intelligence_layer.core.text_highlight.texthighlightoutput attribute)": [[4, "intelligence_layer.core.text_highlight.TextHighlightOutput.model_fields"]], "name (intelligence_layer.core.evaluator.dataset attribute)": [[4, "id16"], [4, "intelligence_layer.core.evaluator.Dataset.name"]], "name (intelligence_layer.core.logger.inmemorydebuglogger attribute)": [[4, "id33"], [4, "intelligence_layer.core.logger.InMemoryDebugLogger.name"]], "name (intelligence_layer.core.logger.startspan attribute)": [[4, "id43"], [4, "intelligence_layer.core.logger.StartSpan.name"]], "name (intelligence_layer.core.logger.starttask attribute)": [[4, "id48"], [4, "intelligence_layer.core.logger.StartTask.name"]], "name (intelligence_layer.core.prompt_template.promptrangetag attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeTag.name"]], "output (intelligence_layer.core.logger.endtask attribute)": [[4, "id28"], [4, "intelligence_layer.core.logger.EndTask.output"]], "output (intelligence_layer.core.logger.filetaskspan attribute)": [[4, "intelligence_layer.core.logger.FileTaskSpan.output"]], "output (intelligence_layer.core.logger.inmemorytaskspan attribute)": [[4, "intelligence_layer.core.logger.InMemoryTaskSpan.output"]], "parent (intelligence_layer.core.logger.plainentry attribute)": [[4, "id40"], [4, "intelligence_layer.core.logger.PlainEntry.parent"]], "parent (intelligence_layer.core.logger.startspan attribute)": [[4, "id44"], [4, "intelligence_layer.core.logger.StartSpan.parent"]], "parent (intelligence_layer.core.logger.starttask attribute)": [[4, "id49"], [4, "intelligence_layer.core.logger.StartTask.parent"]], "parent_context (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.parent_context"]], "parse() (intelligence_layer.core.prompt_template.promptrangetag method)": [[4, "intelligence_layer.core.prompt_template.PromptRangeTag.parse"]], "placeholder() (intelligence_layer.core.prompt_template.prompttemplate method)": [[4, "intelligence_layer.core.prompt_template.PromptTemplate.placeholder"]], "placeholder_range_names() (intelligence_layer.core.prompt_template.promptrangecontext method)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.placeholder_range_names"]], "position (intelligence_layer.core.prompt_template.textcursor attribute)": [[4, "intelligence_layer.core.prompt_template.TextCursor.position"]], "prob (intelligence_layer.core.echo.tokenwithprob attribute)": [[4, "intelligence_layer.core.echo.TokenWithProb.prob"]], "prompt (intelligence_layer.core.echo.echoinput attribute)": [[4, "id13"], [4, "intelligence_layer.core.echo.EchoInput.prompt"]], "prompt (intelligence_layer.core.prompt_template.promptwithmetadata attribute)": [[4, "intelligence_layer.core.prompt_template.PromptWithMetadata.prompt"]], "prompt_with_metadata (intelligence_layer.core.complete.instructoutput attribute)": [[4, "id9"], [4, "intelligence_layer.core.complete.InstructOutput.prompt_with_metadata"]], "prompt_with_metadata (intelligence_layer.core.text_highlight.texthighlightinput attribute)": [[4, "id62"], [4, "intelligence_layer.core.text_highlight.TextHighlightInput.prompt_with_metadata"]], "ranges (intelligence_layer.core.prompt_template.promptwithmetadata attribute)": [[4, "intelligence_layer.core.prompt_template.PromptWithMetadata.ranges"]], "record_output() (intelligence_layer.core.logger.filetaskspan method)": [[4, "intelligence_layer.core.logger.FileTaskSpan.record_output"]], "record_output() (intelligence_layer.core.logger.inmemorytaskspan method)": [[4, "intelligence_layer.core.logger.InMemoryTaskSpan.record_output"]], "record_output() (intelligence_layer.core.logger.nooptaskspan method)": [[4, "intelligence_layer.core.logger.NoOpTaskSpan.record_output"]], "record_output() (intelligence_layer.core.logger.taskspan method)": [[4, "intelligence_layer.core.logger.TaskSpan.record_output"]], "render_to_output() (intelligence_layer.core.prompt_template.promptrangenode method)": [[4, "intelligence_layer.core.prompt_template.PromptRangeNode.render_to_output"]], "request (intelligence_layer.core.complete.completeinput attribute)": [[4, "id1"], [4, "intelligence_layer.core.complete.CompleteInput.request"]], "request (intelligence_layer.core.explain.explaininput attribute)": [[4, "id21"], [4, "intelligence_layer.core.explain.ExplainInput.request"]], "response (intelligence_layer.core.complete.completeoutput attribute)": [[4, "id2"], [4, "intelligence_layer.core.complete.CompleteOutput.response"]], "response (intelligence_layer.core.complete.instructoutput attribute)": [[4, "id10"], [4, "intelligence_layer.core.complete.InstructOutput.response"]], "response (intelligence_layer.core.explain.explainoutput attribute)": [[4, "id22"], [4, "intelligence_layer.core.explain.ExplainOutput.response"]], "response_prefix (intelligence_layer.core.complete.instructinput attribute)": [[4, "id8"], [4, "intelligence_layer.core.complete.InstructInput.response_prefix"]], "root (intelligence_layer.core.logger.jsonserializer attribute)": [[4, "intelligence_layer.core.logger.JsonSerializer.root"]], "run() (intelligence_layer.core.complete.complete method)": [[4, "intelligence_layer.core.complete.Complete.run"]], "run() (intelligence_layer.core.complete.instruct method)": [[4, "intelligence_layer.core.complete.Instruct.run"]], "run() (intelligence_layer.core.echo.echotask method)": [[4, "intelligence_layer.core.echo.EchoTask.run"]], "run() (intelligence_layer.core.explain.explain method)": [[4, "intelligence_layer.core.explain.Explain.run"]], "run() (intelligence_layer.core.task.task method)": [[4, "intelligence_layer.core.task.Task.run"]], "run() (intelligence_layer.core.text_highlight.texthighlight method)": [[4, "intelligence_layer.core.text_highlight.TextHighlight.run"]], "run_concurrently() (intelligence_layer.core.task.task method)": [[4, "intelligence_layer.core.task.Task.run_concurrently"]], "scope (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.scope"]], "score (intelligence_layer.core.text_highlight.scoredtexthighlight attribute)": [[4, "id58"], [4, "intelligence_layer.core.text_highlight.ScoredTextHighlight.score"]], "span() (intelligence_layer.core.logger.debuglogger method)": [[4, "intelligence_layer.core.logger.DebugLogger.span"]], "span() (intelligence_layer.core.logger.filedebuglogger method)": [[4, "intelligence_layer.core.logger.FileDebugLogger.span"]], "span() (intelligence_layer.core.logger.inmemorydebuglogger method)": [[4, "intelligence_layer.core.logger.InMemoryDebugLogger.span"]], "span() (intelligence_layer.core.logger.noopdebuglogger method)": [[4, "intelligence_layer.core.logger.NoOpDebugLogger.span"]], "start (intelligence_layer.core.logger.startspan attribute)": [[4, "id45"], [4, "intelligence_layer.core.logger.StartSpan.start"]], "start (intelligence_layer.core.logger.starttask attribute)": [[4, "id50"], [4, "intelligence_layer.core.logger.StartTask.start"]], "start (intelligence_layer.core.prompt_template.promptrange attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRange.start"]], "start_timestamp (intelligence_layer.core.logger.inmemoryspan attribute)": [[4, "intelligence_layer.core.logger.InMemorySpan.start_timestamp"]], "tag_namespace (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.tag_namespace"]], "target (intelligence_layer.core.text_highlight.texthighlightinput attribute)": [[4, "id63"], [4, "intelligence_layer.core.text_highlight.TextHighlightInput.target"]], "task_span() (intelligence_layer.core.logger.debuglogger method)": [[4, "intelligence_layer.core.logger.DebugLogger.task_span"]], "task_span() (intelligence_layer.core.logger.filedebuglogger method)": [[4, "intelligence_layer.core.logger.FileDebugLogger.task_span"]], "task_span() (intelligence_layer.core.logger.inmemorydebuglogger method)": [[4, "intelligence_layer.core.logger.InMemoryDebugLogger.task_span"]], "task_span() (intelligence_layer.core.logger.noopdebuglogger method)": [[4, "intelligence_layer.core.logger.NoOpDebugLogger.task_span"]], "template (intelligence_layer.core.prompt_template.promptrangecontext attribute)": [[4, "intelligence_layer.core.prompt_template.PromptRangeContext.template"]], "text (intelligence_layer.core.text_highlight.scoredtexthighlight attribute)": [[4, "id59"], [4, "intelligence_layer.core.text_highlight.ScoredTextHighlight.text"]], "timestamp (intelligence_layer.core.logger.logentry attribute)": [[4, "id35"], [4, "intelligence_layer.core.logger.LogEntry.timestamp"]], "timestamp (intelligence_layer.core.logger.plainentry attribute)": [[4, "id41"], [4, "intelligence_layer.core.logger.PlainEntry.timestamp"]], "to_prompt() (intelligence_layer.core.prompt_template.prompttemplate method)": [[4, "intelligence_layer.core.prompt_template.PromptTemplate.to_prompt"]], "to_prompt_with_metadata() (intelligence_layer.core.prompt_template.prompttemplate method)": [[4, "intelligence_layer.core.prompt_template.PromptTemplate.to_prompt_with_metadata"]], "token (intelligence_layer.core.echo.tokenwithprob attribute)": [[4, "intelligence_layer.core.echo.TokenWithProb.token"]], "token (intelligence_layer.core.task.token attribute)": [[4, "intelligence_layer.core.task.Token.token"]], "token_id (intelligence_layer.core.task.token attribute)": [[4, "intelligence_layer.core.task.Token.token_id"]], "tokens_with_log_probs (intelligence_layer.core.echo.echooutput attribute)": [[4, "id14"], [4, "intelligence_layer.core.echo.EchoOutput.tokens_with_log_probs"]], "uuid (intelligence_layer.core.logger.endspan attribute)": [[4, "id26"], [4, "intelligence_layer.core.logger.EndSpan.uuid"]], "uuid (intelligence_layer.core.logger.endtask attribute)": [[4, "id29"], [4, "intelligence_layer.core.logger.EndTask.uuid"]], "uuid (intelligence_layer.core.logger.filedebuglogger attribute)": [[4, "intelligence_layer.core.logger.FileDebugLogger.uuid"]], "uuid (intelligence_layer.core.logger.startspan attribute)": [[4, "id46"], [4, "intelligence_layer.core.logger.StartSpan.uuid"]], "uuid (intelligence_layer.core.logger.starttask attribute)": [[4, "id51"], [4, "intelligence_layer.core.logger.StartTask.uuid"]], "value (intelligence_layer.core.logger.logentry attribute)": [[4, "id36"], [4, "intelligence_layer.core.logger.LogEntry.value"]], "value (intelligence_layer.core.logger.plainentry attribute)": [[4, "id42"], [4, "intelligence_layer.core.logger.PlainEntry.value"]], "intelligence_layer.use_cases": [[5, "module-intelligence_layer.use_cases"]], "aggregatedclassifyevaluation (class in intelligence_layer.use_cases.classify.classify)": [[6, "intelligence_layer.use_cases.classify.classify.AggregatedClassifyEvaluation"]], "classify (class in intelligence_layer.use_cases.classify.classify)": [[6, "intelligence_layer.use_cases.classify.classify.Classify"]], "classifyevaluation (class in intelligence_layer.use_cases.classify.classify)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyEvaluation"]], "classifyevaluator (class in intelligence_layer.use_cases.classify.classify)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyEvaluator"]], "classifyinput (class in intelligence_layer.use_cases.classify.classify)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyInput"]], "classifyoutput (class in intelligence_layer.use_cases.classify.classify)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyOutput"]], "embeddingbasedclassify (class in intelligence_layer.use_cases.classify.embedding_based_classify)": [[6, "intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassify"]], "embeddingbasedclassifyscoring (class in intelligence_layer.use_cases.classify.embedding_based_classify)": [[6, "intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassifyScoring"]], "labelwithexamples (class in intelligence_layer.use_cases.classify.embedding_based_classify)": [[6, "intelligence_layer.use_cases.classify.embedding_based_classify.LabelWithExamples"]], "max (intelligence_layer.use_cases.classify.embedding_based_classify.embeddingbasedclassifyscoring attribute)": [[6, "id7"], [6, "intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassifyScoring.MAX"]], "mean_top_5 (intelligence_layer.use_cases.classify.embedding_based_classify.embeddingbasedclassifyscoring attribute)": [[6, "id8"], [6, "intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassifyScoring.MEAN_TOP_5"]], "metadata_label_name (intelligence_layer.use_cases.classify.embedding_based_classify.embeddingbasedclassify attribute)": [[6, "id6"], [6, "intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassify.METADATA_LABEL_NAME"]], "model (intelligence_layer.use_cases.classify.single_label_classify.singlelabelclassify attribute)": [[6, "id11"], [6, "intelligence_layer.use_cases.classify.single_label_classify.SingleLabelClassify.MODEL"]], "prompt_template (intelligence_layer.use_cases.classify.single_label_classify.singlelabelclassify attribute)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.SingleLabelClassify.PROMPT_TEMPLATE"]], "prompt_template_str (intelligence_layer.use_cases.classify.single_label_classify.singlelabelclassify attribute)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.SingleLabelClassify.PROMPT_TEMPLATE_STR"]], "singlelabelclassify (class in intelligence_layer.use_cases.classify.single_label_classify)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.SingleLabelClassify"]], "treenode (class in intelligence_layer.use_cases.classify.single_label_classify)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.TreeNode"]], "aggregate() (intelligence_layer.use_cases.classify.classify.classifyevaluator method)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyEvaluator.aggregate"]], "chunk (intelligence_layer.use_cases.classify.classify.classifyinput attribute)": [[6, "id3"], [6, "intelligence_layer.use_cases.classify.classify.ClassifyInput.chunk"]], "correct (intelligence_layer.use_cases.classify.classify.classifyevaluation attribute)": [[6, "id1"], [6, "intelligence_layer.use_cases.classify.classify.ClassifyEvaluation.correct"]], "evaluate() (intelligence_layer.use_cases.classify.classify.classifyevaluator method)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyEvaluator.evaluate"]], "evaluation (intelligence_layer.use_cases.classify.classify.aggregatedclassifyevaluation attribute)": [[6, "intelligence_layer.use_cases.classify.classify.AggregatedClassifyEvaluation.evaluation"]], "evaluations (intelligence_layer.use_cases.classify.classify.aggregatedclassifyevaluation attribute)": [[6, "intelligence_layer.use_cases.classify.classify.AggregatedClassifyEvaluation.evaluations"]], "examples (intelligence_layer.use_cases.classify.embedding_based_classify.labelwithexamples attribute)": [[6, "id9"], [6, "intelligence_layer.use_cases.classify.embedding_based_classify.LabelWithExamples.examples"]], "find_child() (intelligence_layer.use_cases.classify.single_label_classify.treenode method)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.TreeNode.find_child"]], "insert_path() (intelligence_layer.use_cases.classify.single_label_classify.treenode method)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.TreeNode.insert_path"]], "insert_without_calculation() (intelligence_layer.use_cases.classify.single_label_classify.treenode method)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.TreeNode.insert_without_calculation"]], "intelligence_layer.use_cases.classify": [[6, "module-intelligence_layer.use_cases.classify"]], "intelligence_layer.use_cases.classify.classify": [[6, "module-intelligence_layer.use_cases.classify.classify"]], "intelligence_layer.use_cases.classify.embedding_based_classify": [[6, "module-intelligence_layer.use_cases.classify.embedding_based_classify"]], "intelligence_layer.use_cases.classify.single_label_classify": [[6, "module-intelligence_layer.use_cases.classify.single_label_classify"]], "labels (intelligence_layer.use_cases.classify.classify.classifyinput attribute)": [[6, "id4"], [6, "intelligence_layer.use_cases.classify.classify.ClassifyInput.labels"]], "model_config (intelligence_layer.use_cases.classify.classify.aggregatedclassifyevaluation attribute)": [[6, "intelligence_layer.use_cases.classify.classify.AggregatedClassifyEvaluation.model_config"]], "model_config (intelligence_layer.use_cases.classify.classify.classifyevaluation attribute)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyEvaluation.model_config"]], "model_config (intelligence_layer.use_cases.classify.classify.classifyinput attribute)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyInput.model_config"]], "model_config (intelligence_layer.use_cases.classify.classify.classifyoutput attribute)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyOutput.model_config"]], "model_config (intelligence_layer.use_cases.classify.embedding_based_classify.labelwithexamples attribute)": [[6, "intelligence_layer.use_cases.classify.embedding_based_classify.LabelWithExamples.model_config"]], "model_fields (intelligence_layer.use_cases.classify.classify.aggregatedclassifyevaluation attribute)": [[6, "intelligence_layer.use_cases.classify.classify.AggregatedClassifyEvaluation.model_fields"]], "model_fields (intelligence_layer.use_cases.classify.classify.classifyevaluation attribute)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyEvaluation.model_fields"]], "model_fields (intelligence_layer.use_cases.classify.classify.classifyinput attribute)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyInput.model_fields"]], "model_fields (intelligence_layer.use_cases.classify.classify.classifyoutput attribute)": [[6, "intelligence_layer.use_cases.classify.classify.ClassifyOutput.model_fields"]], "model_fields (intelligence_layer.use_cases.classify.embedding_based_classify.labelwithexamples attribute)": [[6, "intelligence_layer.use_cases.classify.embedding_based_classify.LabelWithExamples.model_fields"]], "name (intelligence_layer.use_cases.classify.embedding_based_classify.labelwithexamples attribute)": [[6, "id10"], [6, "intelligence_layer.use_cases.classify.embedding_based_classify.LabelWithExamples.name"]], "normalize_probs() (intelligence_layer.use_cases.classify.single_label_classify.treenode method)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.TreeNode.normalize_probs"]], "output (intelligence_layer.use_cases.classify.classify.classifyevaluation attribute)": [[6, "id2"], [6, "intelligence_layer.use_cases.classify.classify.ClassifyEvaluation.output"]], "path() (intelligence_layer.use_cases.classify.single_label_classify.treenode method)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.TreeNode.path"]], "percentage_correct (intelligence_layer.use_cases.classify.classify.aggregatedclassifyevaluation attribute)": [[6, "id0"], [6, "intelligence_layer.use_cases.classify.classify.AggregatedClassifyEvaluation.percentage_correct"]], "run() (intelligence_layer.use_cases.classify.classify.classify method)": [[6, "intelligence_layer.use_cases.classify.classify.Classify.run"]], "run() (intelligence_layer.use_cases.classify.embedding_based_classify.embeddingbasedclassify method)": [[6, "intelligence_layer.use_cases.classify.embedding_based_classify.EmbeddingBasedClassify.run"]], "run() (intelligence_layer.use_cases.classify.single_label_classify.singlelabelclassify method)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.SingleLabelClassify.run"]], "scores (intelligence_layer.use_cases.classify.classify.classifyoutput attribute)": [[6, "id5"], [6, "intelligence_layer.use_cases.classify.classify.ClassifyOutput.scores"]], "to_aa_tokens_prompt() (in module intelligence_layer.use_cases.classify.single_label_classify)": [[6, "intelligence_layer.use_cases.classify.single_label_classify.to_aa_tokens_prompt"]], "longcontextqa (class in intelligence_layer.use_cases.qa.long_context_qa)": [[7, "intelligence_layer.use_cases.qa.long_context_qa.LongContextQa"]], "longcontextqainput (class in intelligence_layer.use_cases.qa.long_context_qa)": [[7, "intelligence_layer.use_cases.qa.long_context_qa.LongContextQaInput"]], "merge_answers_instruction (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqa attribute)": [[7, "id2"], [7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQa.MERGE_ANSWERS_INSTRUCTION"]], "multiplechunkqa (class in intelligence_layer.use_cases.qa.multiple_chunk_qa)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQa"]], "multiplechunkqainput (class in intelligence_layer.use_cases.qa.multiple_chunk_qa)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaInput"]], "multiplechunkqaoutput (class in intelligence_layer.use_cases.qa.multiple_chunk_qa)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaOutput"]], "no_answer_str (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqa attribute)": [[7, "id11"], [7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQa.NO_ANSWER_STR"]], "prompt_template_str (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqa attribute)": [[7, "id12"], [7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQa.PROMPT_TEMPLATE_STR"]], "retrieverbasedqa (class in intelligence_layer.use_cases.qa.retriever_based_qa)": [[7, "intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQa"]], "retrieverbasedqainput (class in intelligence_layer.use_cases.qa.retriever_based_qa)": [[7, "intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQaInput"]], "singlechunkqa (class in intelligence_layer.use_cases.qa.single_chunk_qa)": [[7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQa"]], "singlechunkqainput (class in intelligence_layer.use_cases.qa.single_chunk_qa)": [[7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaInput"]], "singlechunkqaoutput (class in intelligence_layer.use_cases.qa.single_chunk_qa)": [[7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaOutput"]], "subanswer (class in intelligence_layer.use_cases.qa.multiple_chunk_qa)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer"]], "answer (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqaoutput attribute)": [[7, "id5"], [7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaOutput.answer"]], "answer (intelligence_layer.use_cases.qa.multiple_chunk_qa.subanswer attribute)": [[7, "id7"], [7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer.answer"]], "answer (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqaoutput attribute)": [[7, "id15"], [7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaOutput.answer"]], "chunk (intelligence_layer.use_cases.qa.multiple_chunk_qa.subanswer attribute)": [[7, "id8"], [7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer.chunk"]], "chunk (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqainput attribute)": [[7, "id13"], [7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaInput.chunk"]], "chunks (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqainput attribute)": [[7, "id3"], [7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaInput.chunks"]], "highlights (intelligence_layer.use_cases.qa.multiple_chunk_qa.subanswer attribute)": [[7, "id9"], [7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer.highlights"]], "highlights (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqaoutput attribute)": [[7, "id16"], [7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaOutput.highlights"]], "intelligence_layer.use_cases.qa": [[7, "module-intelligence_layer.use_cases.qa"]], "intelligence_layer.use_cases.qa.long_context_qa": [[7, "module-intelligence_layer.use_cases.qa.long_context_qa"]], "intelligence_layer.use_cases.qa.multiple_chunk_qa": [[7, "module-intelligence_layer.use_cases.qa.multiple_chunk_qa"]], "intelligence_layer.use_cases.qa.retriever_based_qa": [[7, "module-intelligence_layer.use_cases.qa.retriever_based_qa"]], "intelligence_layer.use_cases.qa.single_chunk_qa": [[7, "module-intelligence_layer.use_cases.qa.single_chunk_qa"]], "model_config (intelligence_layer.use_cases.qa.long_context_qa.longcontextqainput attribute)": [[7, "intelligence_layer.use_cases.qa.long_context_qa.LongContextQaInput.model_config"]], "model_config (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqainput attribute)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaInput.model_config"]], "model_config (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqaoutput attribute)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaOutput.model_config"]], "model_config (intelligence_layer.use_cases.qa.multiple_chunk_qa.subanswer attribute)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer.model_config"]], "model_config (intelligence_layer.use_cases.qa.retriever_based_qa.retrieverbasedqainput attribute)": [[7, "intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQaInput.model_config"]], "model_config (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqainput attribute)": [[7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaInput.model_config"]], "model_config (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqaoutput attribute)": [[7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaOutput.model_config"]], "model_fields (intelligence_layer.use_cases.qa.long_context_qa.longcontextqainput attribute)": [[7, "intelligence_layer.use_cases.qa.long_context_qa.LongContextQaInput.model_fields"]], "model_fields (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqainput attribute)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaInput.model_fields"]], "model_fields (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqaoutput attribute)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaOutput.model_fields"]], "model_fields (intelligence_layer.use_cases.qa.multiple_chunk_qa.subanswer attribute)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.Subanswer.model_fields"]], "model_fields (intelligence_layer.use_cases.qa.retriever_based_qa.retrieverbasedqainput attribute)": [[7, "intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQaInput.model_fields"]], "model_fields (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqainput attribute)": [[7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaInput.model_fields"]], "model_fields (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqaoutput attribute)": [[7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaOutput.model_fields"]], "question (intelligence_layer.use_cases.qa.long_context_qa.longcontextqainput attribute)": [[7, "id0"], [7, "intelligence_layer.use_cases.qa.long_context_qa.LongContextQaInput.question"]], "question (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqainput attribute)": [[7, "id4"], [7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaInput.question"]], "question (intelligence_layer.use_cases.qa.retriever_based_qa.retrieverbasedqainput attribute)": [[7, "id10"], [7, "intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQaInput.question"]], "question (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqainput attribute)": [[7, "id14"], [7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQaInput.question"]], "run() (intelligence_layer.use_cases.qa.long_context_qa.longcontextqa method)": [[7, "intelligence_layer.use_cases.qa.long_context_qa.LongContextQa.run"]], "run() (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqa method)": [[7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQa.run"]], "run() (intelligence_layer.use_cases.qa.retriever_based_qa.retrieverbasedqa method)": [[7, "intelligence_layer.use_cases.qa.retriever_based_qa.RetrieverBasedQa.run"]], "run() (intelligence_layer.use_cases.qa.single_chunk_qa.singlechunkqa method)": [[7, "intelligence_layer.use_cases.qa.single_chunk_qa.SingleChunkQa.run"]], "subanswers (intelligence_layer.use_cases.qa.multiple_chunk_qa.multiplechunkqaoutput attribute)": [[7, "id6"], [7, "intelligence_layer.use_cases.qa.multiple_chunk_qa.MultipleChunkQaOutput.subanswers"]], "text (intelligence_layer.use_cases.qa.long_context_qa.longcontextqainput attribute)": [[7, "id1"], [7, "intelligence_layer.use_cases.qa.long_context_qa.LongContextQaInput.text"]], "filtersearch (class in intelligence_layer.use_cases.search.filter_search)": [[8, "intelligence_layer.use_cases.search.filter_search.FilterSearch"]], "filtersearchinput (class in intelligence_layer.use_cases.search.filter_search)": [[8, "intelligence_layer.use_cases.search.filter_search.FilterSearchInput"]], "search (class in intelligence_layer.use_cases.search.search)": [[8, "intelligence_layer.use_cases.search.search.Search"]], "searchinput (class in intelligence_layer.use_cases.search.search)": [[8, "intelligence_layer.use_cases.search.search.SearchInput"]], "searchoutput (class in intelligence_layer.use_cases.search.search)": [[8, "intelligence_layer.use_cases.search.search.SearchOutput"]], "filter (intelligence_layer.use_cases.search.filter_search.filtersearchinput attribute)": [[8, "id0"], [8, "intelligence_layer.use_cases.search.filter_search.FilterSearchInput.filter"]], "intelligence_layer.use_cases.search": [[8, "module-intelligence_layer.use_cases.search"]], "intelligence_layer.use_cases.search.filter_search": [[8, "module-intelligence_layer.use_cases.search.filter_search"]], "intelligence_layer.use_cases.search.search": [[8, "module-intelligence_layer.use_cases.search.search"]], "model_config (intelligence_layer.use_cases.search.filter_search.filtersearchinput attribute)": [[8, "intelligence_layer.use_cases.search.filter_search.FilterSearchInput.model_config"]], "model_config (intelligence_layer.use_cases.search.search.searchinput attribute)": [[8, "intelligence_layer.use_cases.search.search.SearchInput.model_config"]], "model_config (intelligence_layer.use_cases.search.search.searchoutput attribute)": [[8, "intelligence_layer.use_cases.search.search.SearchOutput.model_config"]], "model_fields (intelligence_layer.use_cases.search.filter_search.filtersearchinput attribute)": [[8, "intelligence_layer.use_cases.search.filter_search.FilterSearchInput.model_fields"]], "model_fields (intelligence_layer.use_cases.search.search.searchinput attribute)": [[8, "intelligence_layer.use_cases.search.search.SearchInput.model_fields"]], "model_fields (intelligence_layer.use_cases.search.search.searchoutput attribute)": [[8, "intelligence_layer.use_cases.search.search.SearchOutput.model_fields"]], "query (intelligence_layer.use_cases.search.filter_search.filtersearchinput attribute)": [[8, "id1"], [8, "intelligence_layer.use_cases.search.filter_search.FilterSearchInput.query"]], "query (intelligence_layer.use_cases.search.search.searchinput attribute)": [[8, "id2"], [8, "intelligence_layer.use_cases.search.search.SearchInput.query"]], "results (intelligence_layer.use_cases.search.search.searchoutput attribute)": [[8, "id3"], [8, "intelligence_layer.use_cases.search.search.SearchOutput.results"]], "run() (intelligence_layer.use_cases.search.filter_search.filtersearch method)": [[8, "intelligence_layer.use_cases.search.filter_search.FilterSearch.run"]], "run() (intelligence_layer.use_cases.search.search.search method)": [[8, "intelligence_layer.use_cases.search.search.Search.run"]], "instruction (intelligence_layer.use_cases.summarize.summarize.shortbodysummarize attribute)": [[9, "id0"], [9, "intelligence_layer.use_cases.summarize.summarize.ShortBodySummarize.INSTRUCTION"]], "maximum_response_tokens (intelligence_layer.use_cases.summarize.summarize.shortbodysummarize attribute)": [[9, "id1"], [9, "intelligence_layer.use_cases.summarize.summarize.ShortBodySummarize.MAXIMUM_RESPONSE_TOKENS"]], "shortbodysummarize (class in intelligence_layer.use_cases.summarize.summarize)": [[9, "intelligence_layer.use_cases.summarize.summarize.ShortBodySummarize"]], "summarizeinput (class in intelligence_layer.use_cases.summarize.summarize)": [[9, "intelligence_layer.use_cases.summarize.summarize.SummarizeInput"]], "summarizeoutput (class in intelligence_layer.use_cases.summarize.summarize)": [[9, "intelligence_layer.use_cases.summarize.summarize.SummarizeOutput"]], "chunk (intelligence_layer.use_cases.summarize.summarize.summarizeinput attribute)": [[9, "id2"], [9, "intelligence_layer.use_cases.summarize.summarize.SummarizeInput.chunk"]], "highlights (intelligence_layer.use_cases.summarize.summarize.summarizeoutput attribute)": [[9, "id3"], [9, "intelligence_layer.use_cases.summarize.summarize.SummarizeOutput.highlights"]], "intelligence_layer.use_cases.summarize": [[9, "module-intelligence_layer.use_cases.summarize"]], "intelligence_layer.use_cases.summarize.summarize": [[9, "module-intelligence_layer.use_cases.summarize.summarize"]], "model_config (intelligence_layer.use_cases.summarize.summarize.summarizeinput attribute)": [[9, "intelligence_layer.use_cases.summarize.summarize.SummarizeInput.model_config"]], "model_config (intelligence_layer.use_cases.summarize.summarize.summarizeoutput attribute)": [[9, "intelligence_layer.use_cases.summarize.summarize.SummarizeOutput.model_config"]], "model_fields (intelligence_layer.use_cases.summarize.summarize.summarizeinput attribute)": [[9, "intelligence_layer.use_cases.summarize.summarize.SummarizeInput.model_fields"]], "model_fields (intelligence_layer.use_cases.summarize.summarize.summarizeoutput attribute)": [[9, "intelligence_layer.use_cases.summarize.summarize.SummarizeOutput.model_fields"]], "run() (intelligence_layer.use_cases.summarize.summarize.shortbodysummarize method)": [[9, "intelligence_layer.use_cases.summarize.summarize.ShortBodySummarize.run"]], "summary (intelligence_layer.use_cases.summarize.summarize.summarizeoutput attribute)": [[9, "id4"], [9, "intelligence_layer.use_cases.summarize.summarize.SummarizeOutput.summary"]]}}) \ No newline at end of file