From 55d5441b83ea714259ff6077b5f990940b30f7d4 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Thu, 16 Nov 2023 04:26:33 -0500 Subject: [PATCH] [Docs] Add "Run a prompt" section --- .../docs/introduction/getting-started.md | 3 - .../docs/overview/ai-config-format.md | 2 +- .../docs/overview/create-an-aiconfig.md | 2 +- .../docs/overview/define-prompt-chain.md | 1 - .../docs/overview/monitoring-aiconfig.md | 9 + aiconfig-docs/docs/overview/parameters.md | 4 +- aiconfig-docs/docs/overview/run-aiconfig.md | 210 +++++++++++++++++- aiconfig-docs/docs/overview/use-aiconfig.md | 5 - 8 files changed, 220 insertions(+), 16 deletions(-) create mode 100644 aiconfig-docs/docs/overview/monitoring-aiconfig.md delete mode 100644 aiconfig-docs/docs/overview/use-aiconfig.md diff --git a/aiconfig-docs/docs/introduction/getting-started.md b/aiconfig-docs/docs/introduction/getting-started.md index 2c6987541..dd6c1176e 100644 --- a/aiconfig-docs/docs/introduction/getting-started.md +++ b/aiconfig-docs/docs/introduction/getting-started.md @@ -165,7 +165,6 @@ import * as path from "path"; import { AIConfigRuntime, InferenceOptions } from "aiconfig"; async function travelWithGPT() { - // Alternatively, you can use AIConfigRuntime.loadJSON({/*travel.aiconfig.json contents*/}) const aiConfig = AIConfigRuntime.load( path.join(__dirname, "travel.aiconfig.json") ); @@ -189,8 +188,6 @@ async function travelWithGPT() { ```python title="app.py" from aiconfig import AIConfigRuntime, InferenceOptions - -# Load the aiconfig. You can also use AIConfigRuntime.loadJSON({}) config = AIConfigRuntime.load('travel.aiconfig.json') # Run a single prompt (with streaming) diff --git a/aiconfig-docs/docs/overview/ai-config-format.md b/aiconfig-docs/docs/overview/ai-config-format.md index a75892a4c..bf22e8986 100644 --- a/aiconfig-docs/docs/overview/ai-config-format.md +++ b/aiconfig-docs/docs/overview/ai-config-format.md @@ -5,7 +5,7 @@ sidebar_position: 1 import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# The AIConfig Format +# AIConfig Specification ## Introduction diff --git a/aiconfig-docs/docs/overview/create-an-aiconfig.md b/aiconfig-docs/docs/overview/create-an-aiconfig.md index ac53ea51b..0e85ecfc5 100644 --- a/aiconfig-docs/docs/overview/create-an-aiconfig.md +++ b/aiconfig-docs/docs/overview/create-an-aiconfig.md @@ -6,7 +6,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import constants from '@site/core/tabConstants'; -# Create an `aiconfig` +# Create an AIConfig There are 2 ways to create an `aiconfig` from scratch. diff --git a/aiconfig-docs/docs/overview/define-prompt-chain.md b/aiconfig-docs/docs/overview/define-prompt-chain.md index fe46f5a85..a9f31a9d9 100644 --- a/aiconfig-docs/docs/overview/define-prompt-chain.md +++ b/aiconfig-docs/docs/overview/define-prompt-chain.md @@ -184,7 +184,6 @@ config = AIConfigRuntime.load('travel.aiconfig.json') # and then use its output to run the gen_itinerary using GPT-4 await config.run( "gen_itinerary", - params=None, options=InferenceOptions(stream=True), run_with_dependencies=True) ``` diff --git a/aiconfig-docs/docs/overview/monitoring-aiconfig.md b/aiconfig-docs/docs/overview/monitoring-aiconfig.md new file mode 100644 index 000000000..c61361729 --- /dev/null +++ b/aiconfig-docs/docs/overview/monitoring-aiconfig.md @@ -0,0 +1,9 @@ +--- +sidebar_position: 7 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import constants from '@site/core/tabConstants'; + +# Tracing and Monitoring diff --git a/aiconfig-docs/docs/overview/parameters.md b/aiconfig-docs/docs/overview/parameters.md index 52189105b..3a88e3184 100644 --- a/aiconfig-docs/docs/overview/parameters.md +++ b/aiconfig-docs/docs/overview/parameters.md @@ -6,7 +6,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import constants from '@site/core/tabConstants'; -# Passing data into prompts +# Passing Data into Prompts Passing data into prompts is fundamental to `aiconfig`. It allows you to store prompt _templates_ as the config, and resolve the template into a prompt by passing in data. @@ -298,7 +298,7 @@ params = { } # Run the prompt chain -await config.run("translate", params, options=None, run_with_dependencies=True) +await config.run("translate", params, run_with_dependencies=True) ``` diff --git a/aiconfig-docs/docs/overview/run-aiconfig.md b/aiconfig-docs/docs/overview/run-aiconfig.md index fafef43b9..8d6e14eff 100644 --- a/aiconfig-docs/docs/overview/run-aiconfig.md +++ b/aiconfig-docs/docs/overview/run-aiconfig.md @@ -2,10 +2,214 @@ sidebar_position: 5 --- -# Run an AIConfig +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import constants from '@site/core/tabConstants'; + +# Run a Prompt + +:::tip +**TLDR**: Call `config.run("prompt_name")`. + +If you want to re-run the transitive closure of dependencies in a prompt chain, call `config.run("prompt_name", params, options, run_with_dependencies=True)`. +::: + +Once you've [created an `aiconfig`](/docs/overview/create-an-aiconfig), defined your prompts and [prompt chains](/docs/overview/define-prompt-chain), it is time to run the prompt. + +Running a prompt means invoking model inference for that prompt. The interface for running a prompt is the same no matter what underlying model is being invoked. This is one of the things that makes `aiconfig` powerful -- by removing model-specific logic from your application code, it streamlines your application and helps you iterate faster. + + + + +```typescript title="app.ts" +import * as path from "path"; +import { AIConfigRuntime } from "aiconfig"; + +async function runPrompt() { + const config = AIConfigRuntime.load(path.join(__dirname, "aiconfig.json")); + const result = await config.run("prompt_name"); + return result; +} +``` + + + + +```python title="app.py" +from aiconfig import AIConfigRuntime + +config = AIConfigRuntime.load('aiconfig.json') +result = await config.run("prompt_name") +``` + + + + +Under the covers, the `run` function does a couple of things: + +- It deserializes the given prompt into the data type expected by the model's inference endpoint. +- It applies model settings specified in the prompt and global [metadata](/docs/overview/ai-config-format#metadata). +- It passes data using [parameters](/docs/overview/parameters) specified in the `run` call. +- It calls the model's inference endpoint with the fully resolved arguments in the shape expected by the model. +- Finally, it caches the resulting outputs in the `AIConfigRuntime` object. ## Run a single Prompt -## Run with Dependencies +Running a single prompt is just done with `config.run`. The request will be routed to the model corresponding to that prompt. + +:::note +The nice side effect of this is that you can swap out `aiconfig`s used by your application, change the underlying models and settings, and never need to update your application code! +::: + +## Run a Prompt chain + +### Running with cached outputs + +Consider the following example `aiconfig`. `gen_itinerary` is a prompt chain that depends on the output of `get_activities`. + +```json +{ + "name": "NYC Trip Planner", + "description": "Intrepid explorer with ChatGPT and AIConfig", + "schema_version": "latest", + "metadata": {}, + "prompts": [ + { + "name": "get_activities", + "input": "Tell me 10 fun attractions to do in NYC.", + "metadata": { + "model": "gpt-3.5-turbo" + } + }, + { + "name": "gen_itinerary", + "input": "Generate an itinerary ordered by geographic location for these activities: {{get_activities.output}}.", + "metadata": { + "model": "gpt-4" + } + } + ] +} +``` + +By default, calling `gen_itinerary` will use the cached output for `get_activities`. + + + + +```typescript title="app.ts" +import * as path from "path"; +import { AIConfigRuntime, InferenceOptions } from "aiconfig"; + +async function travelWithGPT() { + const config = AIConfigRuntime.load( + path.join(__dirname, "travel.aiconfig.json") + ); + + await config.run("get_activities"); + + // Uses the cached output for `get_activities` to resolve the `gen_itinerary` prompt + await config.run("gen_itinerary"); +} +``` + + + + +```python title="app.py" +from aiconfig import AIConfigRuntime, InferenceOptions +config = AIConfigRuntime.load('travel.aiconfig.json') + +await config.run("get_activities") + +# Uses the cached output for `get_activities` to resolve the `gen_itinerary` prompt +await config.run("gen_itinerary"); +``` + + + + +### Re-running the entire chain + +Running with dependencies is useful to re-executing [prompt chains](/docs/overview/define-prompt-chain). + + + + +```typescript title="app.ts" +import * as path from "path"; +import { AIConfigRuntime, InferenceOptions } from "aiconfig"; + +async function travelWithGPT() { + const config = AIConfigRuntime.load( + path.join(__dirname, "travel.aiconfig.json") + ); + + // Re-runs `get_activities` first, and then uses the output to resolve the `gen_itinerary` prompt + await config.runWithDependencies("gen_itinerary"); +} +``` + + + + +```python title="app.py" +from aiconfig import AIConfigRuntime, InferenceOptions +config = AIConfigRuntime.load('travel.aiconfig.json') + +# Re-runs `get_activities` first, and then uses the output to resolve the `gen_itinerary` prompt +await config.run("gen_itinerary", run_with_dependencies=True); +``` + + + + +## Streaming outputs + +The `run` API makes it easy to stream outputs in a consistent way across any model that supports it. + +You can pass in an `InferenceOptions` object, which allows you to specify a streaming callback: + + + + +```typescript title="app.ts" +import * as path from "path"; +import { AIConfigRuntime, InferenceOptions } from "aiconfig"; + +async function streamOutputs() { + const aiConfig = AIConfigRuntime.load( + path.join(__dirname, "travel.aiconfig.json") + ); + + const options: InferenceOptions = { + callbacks: { + streamCallback: (data: any, _acc: any, _idx: any) => { + // Write streamed content to console + process.stdout.write(data?.content || "\n"); + }, + }, + }; + + // Run a single prompt + await aiConfig.run("get_activities", /*params*/ undefined, options); +} +``` + + + + +```python title="app.py" +from aiconfig import AIConfigRuntime, InferenceOptions +config = AIConfigRuntime.load('travel.aiconfig.json') + +inference_options = InferenceOptions(stream=True) # Defines a console streaming callback +await config.run("get_activities", options=inference_options) +``` + + + + +## Passing data into prompts -## Run All +You can pass data into prompts using parameters. Please see [this guide](/docs/overview/parameters) to learn more. diff --git a/aiconfig-docs/docs/overview/use-aiconfig.md b/aiconfig-docs/docs/overview/use-aiconfig.md deleted file mode 100644 index 58cf89e8d..000000000 --- a/aiconfig-docs/docs/overview/use-aiconfig.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -sidebar_position: 7 ---- - -# Using AIConfig in your application