diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index 67ed1b664a..e645da9ad4 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -34,6 +34,7 @@ import { ServiceType, SearchResponse, ActionResponse, + TelemetrySettings, } from "./types.ts"; import { fal } from "@fal-ai/client"; @@ -164,6 +165,9 @@ export async function generateText({ const max_response_length = modelConfiguration?.max_response_length || models[provider].settings.maxOutputTokens; + const experimental_telemetry = + modelConfiguration?.experimental_telemetry || + models[provider].settings.experimental_telemetry; const apiKey = runtime.token; @@ -209,6 +213,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = openaiResponse; @@ -232,6 +237,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = googleResponse; @@ -258,6 +264,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = anthropicResponse; @@ -284,6 +291,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = anthropicResponse; @@ -314,6 +322,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = grokResponse; @@ -335,6 +344,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = groqResponse; @@ -386,6 +396,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = redpillResponse; @@ -413,6 +424,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = openrouterResponse; @@ -439,6 +451,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = ollamaResponse; @@ -466,6 +479,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = heuristResponse; @@ -515,6 +529,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = openaiResponse; @@ -541,6 +556,7 @@ export async function generateText({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, }); response = galadrielResponse; @@ -1357,6 +1373,7 @@ interface ModelSettings { frequencyPenalty: number; presencePenalty: number; stop?: string[]; + experimental_telemetry?: TelemetrySettings; } /** @@ -1392,6 +1409,7 @@ export const generateObject = async ({ const presence_penalty = models[provider].settings.presence_penalty; const max_context_length = models[provider].settings.maxInputTokens; const max_response_length = models[provider].settings.maxOutputTokens; + const experimental_telemetry = models[provider].settings.experimental_telemetry; const apiKey = runtime.token; try { @@ -1404,6 +1422,7 @@ export const generateObject = async ({ frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, stop: stop || models[provider].settings.stop, + experimental_telemetry: experimental_telemetry, }; const response = await handleProvider({ diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index dfc19c2eb2..dfb82ff8a5 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -165,6 +165,9 @@ export type Model = { /** Temperature setting */ temperature: number; + + /** Optional telemetry configuration (experimental) */ + experimental_telemetry?: TelemetrySettings; }; /** Optional image generation settings */ @@ -628,12 +631,38 @@ export interface IAgentConfig { [key: string]: string; } +export type TelemetrySettings = { + /** + * Enable or disable telemetry. Disabled by default while experimental. + */ + isEnabled?: boolean; + /** + * Enable or disable input recording. Enabled by default. + * + * You might want to disable input recording to avoid recording sensitive + * information, to reduce data transfers, or to increase performance. + */ + recordInputs?: boolean; + /** + * Enable or disable output recording. Enabled by default. + * + * You might want to disable output recording to avoid recording sensitive + * information, to reduce data transfers, or to increase performance. + */ + recordOutputs?: boolean; + /** + * Identifier for this function. Used to group telemetry data by function. + */ + functionId?: string; +}; + export interface ModelConfiguration { temperature?: number; max_response_length?: number; frequency_penalty?: number; presence_penalty?: number; maxInputTokens?: number; + experimental_telemetry?: TelemetrySettings; } /**