Skip to content

Commit

Permalink
1/n align deserialize() to library design
Browse files Browse the repository at this point in the history
## What

Exclude Previous Run Outputs when performing config.resolve(), aka, model parser `.deserialize()`

## Why

When resolving (deserializing) a prompt, if the prompt as executed before, the output from the previous execution is currently included in the deserialized state. While this was the expected behbaiour in the past, it has become apparent that including previous outputs is counterintuitive and not user-friendly
  • Loading branch information
Ankush Pala [email protected] committed Dec 5, 2023
1 parent 72f7a0e commit b9168ec
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 10 deletions.
7 changes: 4 additions & 3 deletions python/src/aiconfig/default_parsers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ async def deserialize(
)

# Add in the latest prompt
add_prompt_as_message(prompt, aiconfig, completion_params["messages"], params)
add_prompt_as_message(prompt, aiconfig, completion_params["messages"], params, is_last_prompt=True)
await aiconfig.callback_manager.run_callbacks(
CallbackEvent(
"on_deserialize_complete", __name__, {"output": completion_params}
Expand Down Expand Up @@ -452,7 +452,7 @@ def refine_chat_completion_params(model_settings):


def add_prompt_as_message(
prompt: Prompt, aiconfig: "AIConfigRuntime", messages: List, params=None
prompt: Prompt, aiconfig: "AIConfigRuntime", messages: List, params=None, is_last_prompt: bool = False
):
"""
Converts a given prompt to a message and adds it to the specified messages list.
Expand Down Expand Up @@ -491,7 +491,8 @@ def add_prompt_as_message(
messages.append(message_data)

output = aiconfig.get_latest_output(prompt)
if output:
# Avoid deserializing the last prompt's output. The output from the previous execution should not be included.
if output and is_last_prompt is not True:
if output.output_type == "execute_result":
output_message = output.data
if output_message["role"] == "assistant":
Expand Down
19 changes: 12 additions & 7 deletions typescript/lib/parsers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -454,16 +454,19 @@ export class OpenAIChatModelParser extends ParameterizedModelParser<Chat.ChatCom
aiConfig.getModelName(currentPrompt) ===
aiConfig.getModelName(prompt)
) {
if (currentPrompt.name === prompt.name) {
// Add Last Prompt. We have reached the end of the chat history
this.addPromptAsMessage(currentPrompt, aiConfig, messages, params, true);
break;
}
this.addPromptAsMessage(currentPrompt, aiConfig, messages, params);
}

if (currentPrompt.name === prompt.name) {
// If this is the current prompt, then we have reached the end of the chat history
break;
}

}
} else {
this.addPromptAsMessage(prompt, aiConfig, messages, params);
// If we don't want to remember the chat context, then we only need to add the latest prompt as a message
this.addPromptAsMessage(prompt, aiConfig, messages, params, true);
}

// Update the completion params with the resolved messages
Expand Down Expand Up @@ -641,7 +644,8 @@ export class OpenAIChatModelParser extends ParameterizedModelParser<Chat.ChatCom
prompt: Prompt,
aiConfig: AIConfigRuntime,
messages: Chat.ChatCompletionMessageParam[],
params?: JSONObject
params?: JSONObject,
isLastPrompt: Boolean = false,
) {
// Resolve the prompt with the given parameters, and add it to the messages array
const promptTemplate = this.getPromptTemplate(prompt, aiConfig);
Expand All @@ -668,7 +672,8 @@ export class OpenAIChatModelParser extends ParameterizedModelParser<Chat.ChatCom
}

const output = aiConfig.getLatestOutput(prompt);
if (output != null) {
// Avoid deserializing the last prompt's output. The output from the previous execution should not be included.
if (output != null && isLastPrompt !== true) {
if (output.output_type === "execute_result") {
const outputMessage =
output.data as unknown as Chat.ChatCompletionMessageParam;
Expand Down

0 comments on commit b9168ec

Please sign in to comment.